@lobehub/lobehub 2.0.0-next.234 → 2.0.0-next.236

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/.devcontainer/devcontainer.json +4 -2
  2. package/CHANGELOG.md +50 -0
  3. package/changelog/v1.json +14 -0
  4. package/locales/ar/components.json +1 -0
  5. package/locales/ar/file.json +4 -0
  6. package/locales/ar/models.json +29 -0
  7. package/locales/ar/setting.json +7 -0
  8. package/locales/bg-BG/components.json +1 -0
  9. package/locales/bg-BG/file.json +4 -0
  10. package/locales/bg-BG/models.json +1 -0
  11. package/locales/bg-BG/setting.json +7 -0
  12. package/locales/de-DE/components.json +1 -0
  13. package/locales/de-DE/file.json +4 -0
  14. package/locales/de-DE/models.json +29 -0
  15. package/locales/de-DE/setting.json +7 -0
  16. package/locales/en-US/common.json +0 -1
  17. package/locales/en-US/components.json +1 -0
  18. package/locales/en-US/file.json +4 -0
  19. package/locales/en-US/models.json +1 -0
  20. package/locales/es-ES/components.json +1 -0
  21. package/locales/es-ES/file.json +4 -0
  22. package/locales/es-ES/models.json +43 -0
  23. package/locales/es-ES/setting.json +7 -0
  24. package/locales/fa-IR/components.json +1 -0
  25. package/locales/fa-IR/file.json +4 -0
  26. package/locales/fa-IR/models.json +54 -0
  27. package/locales/fa-IR/setting.json +7 -0
  28. package/locales/fr-FR/components.json +1 -0
  29. package/locales/fr-FR/file.json +4 -0
  30. package/locales/fr-FR/models.json +31 -0
  31. package/locales/fr-FR/setting.json +7 -0
  32. package/locales/it-IT/components.json +1 -0
  33. package/locales/it-IT/file.json +4 -0
  34. package/locales/it-IT/models.json +43 -0
  35. package/locales/it-IT/setting.json +7 -0
  36. package/locales/ja-JP/components.json +1 -0
  37. package/locales/ja-JP/file.json +4 -0
  38. package/locales/ja-JP/models.json +28 -0
  39. package/locales/ja-JP/setting.json +7 -0
  40. package/locales/ko-KR/components.json +1 -0
  41. package/locales/ko-KR/file.json +4 -0
  42. package/locales/ko-KR/models.json +37 -0
  43. package/locales/ko-KR/setting.json +7 -0
  44. package/locales/nl-NL/components.json +1 -0
  45. package/locales/nl-NL/file.json +4 -0
  46. package/locales/nl-NL/models.json +13 -0
  47. package/locales/nl-NL/setting.json +7 -0
  48. package/locales/pl-PL/components.json +1 -0
  49. package/locales/pl-PL/file.json +4 -0
  50. package/locales/pl-PL/models.json +13 -0
  51. package/locales/pl-PL/setting.json +7 -0
  52. package/locales/pt-BR/components.json +1 -0
  53. package/locales/pt-BR/file.json +4 -0
  54. package/locales/pt-BR/models.json +29 -0
  55. package/locales/pt-BR/setting.json +7 -0
  56. package/locales/ru-RU/components.json +1 -0
  57. package/locales/ru-RU/file.json +4 -0
  58. package/locales/ru-RU/models.json +1 -0
  59. package/locales/ru-RU/setting.json +7 -0
  60. package/locales/tr-TR/components.json +1 -0
  61. package/locales/tr-TR/file.json +4 -0
  62. package/locales/tr-TR/models.json +29 -0
  63. package/locales/tr-TR/setting.json +7 -0
  64. package/locales/vi-VN/components.json +1 -0
  65. package/locales/vi-VN/file.json +4 -0
  66. package/locales/vi-VN/models.json +1 -0
  67. package/locales/vi-VN/setting.json +7 -0
  68. package/locales/zh-CN/models.json +46 -0
  69. package/locales/zh-TW/components.json +1 -0
  70. package/locales/zh-TW/file.json +4 -0
  71. package/locales/zh-TW/models.json +35 -0
  72. package/locales/zh-TW/setting.json +7 -0
  73. package/package.json +1 -1
  74. package/packages/model-bank/src/aiModels/anthropic.ts +0 -30
  75. package/packages/model-bank/src/aiModels/volcengine.ts +2 -1
  76. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +1 -7
  77. package/src/server/routers/lambda/_helpers/resolveContext.ts +8 -8
  78. package/src/server/routers/lambda/agent.ts +1 -1
  79. package/src/server/routers/lambda/aiModel.ts +1 -1
  80. package/src/server/routers/lambda/comfyui.ts +1 -1
  81. package/src/server/routers/lambda/exporter.ts +1 -1
  82. package/src/server/routers/lambda/image.ts +13 -13
  83. package/src/server/routers/lambda/klavis.ts +10 -10
  84. package/src/server/routers/lambda/market/index.ts +6 -6
  85. package/src/server/routers/lambda/message.ts +2 -2
  86. package/src/server/routers/lambda/plugin.ts +1 -1
  87. package/src/server/routers/lambda/ragEval.ts +2 -2
  88. package/src/server/routers/lambda/topic.ts +3 -3
  89. package/src/server/routers/lambda/user.ts +10 -10
  90. package/src/server/routers/lambda/userMemories.ts +6 -6
@@ -400,6 +400,19 @@
400
400
  "deepseek/deepseek-r1-0528:free.description": "DeepSeek-R1 verbetert het redeneervermogen aanzienlijk met minimale gelabelde data en genereert een gedachtegang vóór het eindantwoord om de nauwkeurigheid te verhogen.",
401
401
  "deepseek/deepseek-r1-distill-llama-70b.description": "DeepSeek R1 Distill Llama 70B is een gedistilleerd LLM gebaseerd op Llama 3.3 70B, fijngestemd met DeepSeek R1-uitvoer om concurrerende prestaties te leveren met grote frontiermodellen.",
402
402
  "deepseek/deepseek-r1-distill-llama-8b.description": "DeepSeek R1 Distill Llama 8B is een gedistilleerd LLM gebaseerd op Llama-3.1-8B-Instruct, getraind met DeepSeek R1-uitvoer.",
403
+ "deepseek/deepseek-r1-distill-qwen-14b.description": "DeepSeek R1 Distill Qwen 14B is een gedistilleerd LLM gebaseerd op Qwen 2.5 14B, getraind met behulp van DeepSeek R1-uitvoer. Het presteert beter dan OpenAI o1-mini op meerdere benchmarks en behaalt toonaangevende resultaten onder dichte modellen. Benchmark hoogtepunten:\nAIME 2024 pass@1: 69,7\nMATH-500 pass@1: 93,9\nCodeForces Rating: 1481\nFijn-afstemming op DeepSeek R1-uitvoer levert concurrerende prestaties ten opzichte van grotere frontiermodellen.",
404
+ "deepseek/deepseek-r1-distill-qwen-32b.description": "DeepSeek R1 Distill Qwen 32B is een gedistilleerd LLM gebaseerd op Qwen 2.5 32B, getraind met behulp van DeepSeek R1-uitvoer. Het presteert beter dan OpenAI o1-mini op meerdere benchmarks en behaalt toonaangevende resultaten onder dichte modellen. Benchmark hoogtepunten:\nAIME 2024 pass@1: 72,6\nMATH-500 pass@1: 94,3\nCodeForces Rating: 1691\nFijn-afstemming op DeepSeek R1-uitvoer levert concurrerende prestaties ten opzichte van grotere frontiermodellen.",
405
+ "deepseek/deepseek-r1.description": "DeepSeek R1 is geüpdatet naar DeepSeek-R1-0528. Dankzij meer rekenkracht en algoritmische optimalisaties na de training is het redeneervermogen en de diepgang aanzienlijk verbeterd. Het presteert sterk op benchmarks voor wiskunde, programmeren en algemene logica, en benadert toonaangevende modellen zoals o3 en Gemini 2.5 Pro.",
406
+ "deepseek/deepseek-r1/community.description": "DeepSeek R1 is het nieuwste open-source model van het DeepSeek-team, met zeer sterke redeneercapaciteiten, vooral op het gebied van wiskunde, codering en logische taken, vergelijkbaar met OpenAI o1.",
407
+ "deepseek/deepseek-r1:free.description": "DeepSeek-R1 verbetert het redeneervermogen aanzienlijk met minimale gelabelde data en genereert een redeneerketen vóór het uiteindelijke antwoord om de nauwkeurigheid te verhogen.",
408
+ "deepseek/deepseek-reasoner.description": "DeepSeek-V3 Thinking (reasoner) is het experimentele redeneermodel van DeepSeek, geschikt voor taken met hoge complexiteit.",
409
+ "deepseek/deepseek-v3.1-base.description": "DeepSeek V3.1 Base is een verbeterde versie van het DeepSeek V3-model.",
410
+ "deepseek/deepseek-v3.description": "Een snel, algemeen inzetbaar LLM met verbeterd redeneervermogen.",
411
+ "deepseek/deepseek-v3/community.description": "DeepSeek-V3 betekent een grote doorbraak in redeneersnelheid ten opzichte van eerdere modellen. Het staat bovenaan onder open-source modellen en kan zich meten met de meest geavanceerde gesloten modellen. DeepSeek-V3 maakt gebruik van Multi-Head Latent Attention (MLA) en de DeepSeekMoE-architectuur, beide volledig gevalideerd in DeepSeek-V2. Het introduceert ook een verliesloze hulpsstrategie voor load balancing en een multi-token predictie trainingsdoel voor betere prestaties.",
412
+ "deepseek_r1.description": "DeepSeek-R1 is een redeneermodel aangedreven door reinforcement learning dat herhaling en leesbaarheid aanpakt. Voorafgaand aan RL wordt cold-start data gebruikt om het redeneervermogen verder te verbeteren. Het evenaart OpenAI-o1 op het gebied van wiskunde, codering en redeneertaken, met zorgvuldig ontworpen training voor betere algehele resultaten.",
413
+ "deepseek_r1_distill_llama_70b.description": "DeepSeek-R1-Distill-Llama-70B is gedistilleerd van Llama-3.3-70B-Instruct. Als onderdeel van de DeepSeek-R1-serie is het fijn-afgestemd op door DeepSeek-R1 gegenereerde voorbeelden en presteert het sterk in wiskunde, codering en redeneren.",
414
+ "deepseek_r1_distill_qwen_14b.description": "DeepSeek-R1-Distill-Qwen-14B is gedistilleerd van Qwen2.5-14B en fijn-afgestemd op 800K zorgvuldig geselecteerde voorbeelden gegenereerd door DeepSeek-R1, met sterk redeneervermogen.",
415
+ "deepseek_r1_distill_qwen_32b.description": "DeepSeek-R1-Distill-Qwen-32B is gedistilleerd van Qwen2.5-32B en fijn-afgestemd op 800K zorgvuldig geselecteerde voorbeelden gegenereerd door DeepSeek-R1, en blinkt uit in wiskunde, codering en redeneren.",
403
416
  "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 is een open LLM voor ontwikkelaars, onderzoekers en bedrijven, ontworpen om hen te helpen bij het bouwen, experimenteren en verantwoord opschalen van generatieve AI-ideeën. Als onderdeel van de basis voor wereldwijde gemeenschapsinnovatie is het goed geschikt voor beperkte rekenkracht en middelen, edge-apparaten en snellere trainingstijden.",
404
417
  "meta/Llama-3.2-11B-Vision-Instruct.description": "Sterke beeldredenering op afbeeldingen met hoge resolutie, geschikt voor toepassingen voor visueel begrip.",
405
418
  "meta/Llama-3.2-90B-Vision-Instruct.description": "Geavanceerde beeldredenering voor toepassingen met visueel begrip en agentfunctionaliteit.",
@@ -127,6 +127,10 @@
127
127
  "llm.proxyUrl.title": "API-proxy-URL",
128
128
  "llm.waitingForMore": "Er worden <1>meer modellen gepland</1>, blijf op de hoogte",
129
129
  "llm.waitingForMoreLinkAriaLabel": "Open het aanvraagformulier voor aanbieders",
130
+ "marketPublish.forkConfirm.by": "door {{author}}",
131
+ "marketPublish.forkConfirm.confirm": "Publicatie Bevestigen",
132
+ "marketPublish.forkConfirm.description": "Je staat op het punt een afgeleide versie te publiceren, gebaseerd op een bestaande agent uit de community. Je nieuwe agent wordt als een afzonderlijk item in de marktplaats geplaatst.",
133
+ "marketPublish.forkConfirm.title": "Afgeleide Agent Publiceren",
130
134
  "marketPublish.modal.changelog.extra": "Beschrijf de belangrijkste wijzigingen en verbeteringen in deze versie",
131
135
  "marketPublish.modal.changelog.label": "Wijzigingslogboek",
132
136
  "marketPublish.modal.changelog.maxLengthError": "Het wijzigingslogboek mag niet meer dan 500 tekens bevatten",
@@ -524,6 +528,9 @@
524
528
  "tools.klavis.servers": "servers",
525
529
  "tools.klavis.tools": "tools",
526
530
  "tools.klavis.verifyAuth": "Ik heb de authenticatie voltooid",
531
+ "tools.lobehubSkill.authorize": "Autoriseren",
532
+ "tools.lobehubSkill.connect": "Verbinden",
533
+ "tools.lobehubSkill.error": "Fout",
527
534
  "tools.notInstalled": "Niet Geïnstalleerd",
528
535
  "tools.notInstalledWarning": "Deze vaardigheid is momenteel niet geïnstalleerd, wat de functionaliteit van de agent kan beïnvloeden.",
529
536
  "tools.plugins.enabled": "Ingeschakeld: {{num}}",
@@ -99,6 +99,7 @@
99
99
  "ModelSwitchPanel.goToSettings": "Przejdź do ustawień",
100
100
  "ModelSwitchPanel.manageProvider": "Zarządzaj dostawcą",
101
101
  "ModelSwitchPanel.provider": "Dostawca",
102
+ "ModelSwitchPanel.searchPlaceholder": "Szukaj modeli...",
102
103
  "ModelSwitchPanel.title": "Model",
103
104
  "ModelSwitchPanel.useModelFrom": "Użyj tego modelu od:",
104
105
  "MultiImagesUpload.actions.uploadMore": "Kliknij lub przeciągnij, aby przesłać więcej",
@@ -37,6 +37,7 @@
37
37
  "header.actions.notionGuide.title": "Importuj z Notion",
38
38
  "header.actions.uploadFile": "Prześlij plik",
39
39
  "header.actions.uploadFolder": "Prześlij folder",
40
+ "header.actions.uploadFolder.creatingFolders": "Tworzenie struktury folderów...",
40
41
  "header.newPageButton": "Nowa strona",
41
42
  "header.uploadButton": "Prześlij",
42
43
  "home.getStarted": "Rozpocznij",
@@ -119,6 +120,8 @@
119
120
  "title": "Zasoby",
120
121
  "toggleLeftPanel": "Pokaż/Ukryj panel boczny",
121
122
  "uploadDock.body.collapse": "Zwiń",
123
+ "uploadDock.body.item.cancel": "Anuluj",
124
+ "uploadDock.body.item.cancelled": "Anulowano",
122
125
  "uploadDock.body.item.done": "Przesłano",
123
126
  "uploadDock.body.item.error": "Błąd przesyłania, spróbuj ponownie",
124
127
  "uploadDock.body.item.pending": "Przygotowywanie do przesłania...",
@@ -126,6 +129,7 @@
126
129
  "uploadDock.body.item.restTime": "Pozostało {{time}}",
127
130
  "uploadDock.fileQueueInfo": "Przesyłanie pierwszych {{count}} plików, {{remaining}} w kolejce",
128
131
  "uploadDock.totalCount": "Łącznie {{count}} elementów",
132
+ "uploadDock.uploadStatus.cancelled": "Przesyłanie anulowane",
129
133
  "uploadDock.uploadStatus.error": "Błąd przesyłania",
130
134
  "uploadDock.uploadStatus.pending": "Oczekiwanie na przesłanie",
131
135
  "uploadDock.uploadStatus.processing": "Przesyłanie",
@@ -400,6 +400,19 @@
400
400
  "deepseek/deepseek-r1-0528:free.description": "DeepSeek-R1 znacznie poprawia rozumowanie przy minimalnej ilości oznaczonych danych i generuje łańcuch rozumowania przed odpowiedzią końcową, zwiększając trafność.",
401
401
  "deepseek/deepseek-r1-distill-llama-70b.description": "DeepSeek R1 Distill Llama 70B to zdestylowany model LLM oparty na Llama 3.3 70B, dostrojony przy użyciu wyników DeepSeek R1, osiągający konkurencyjne wyniki względem czołowych modeli.",
402
402
  "deepseek/deepseek-r1-distill-llama-8b.description": "DeepSeek R1 Distill Llama 8B to zdestylowany model LLM oparty na Llama-3.1-8B-Instruct, trenowany przy użyciu wyników DeepSeek R1.",
403
+ "deepseek/deepseek-r1-distill-qwen-14b.description": "DeepSeek R1 Distill Qwen 14B to odchudzony model LLM oparty na Qwen 2.5 14B, wytrenowany na danych wyjściowych DeepSeek R1. Przewyższa OpenAI o1-mini w wielu testach porównawczych, osiągając najnowocześniejsze wyniki wśród modeli gęstych. Najważniejsze wyniki benchmarków:\nAIME 2024 pass@1: 69,7\nMATH-500 pass@1: 93,9\nOcena CodeForces: 1481\nDostrajanie na danych DeepSeek R1 zapewnia konkurencyjną wydajność względem większych modeli czołowych.",
404
+ "deepseek/deepseek-r1-distill-qwen-32b.description": "DeepSeek R1 Distill Qwen 32B to odchudzony model LLM oparty na Qwen 2.5 32B, wytrenowany na danych wyjściowych DeepSeek R1. Przewyższa OpenAI o1-mini w wielu testach porównawczych, osiągając najnowocześniejsze wyniki wśród modeli gęstych. Najważniejsze wyniki benchmarków:\nAIME 2024 pass@1: 72,6\nMATH-500 pass@1: 94,3\nOcena CodeForces: 1691\nDostrajanie na danych DeepSeek R1 zapewnia konkurencyjną wydajność względem większych modeli czołowych.",
405
+ "deepseek/deepseek-r1.description": "DeepSeek R1 został zaktualizowany do wersji DeepSeek-R1-0528. Dzięki większej mocy obliczeniowej i optymalizacjom algorytmicznym po treningu, znacząco poprawiono głębokość i zdolność rozumowania. Model osiąga wysokie wyniki w testach z zakresu matematyki, programowania i logiki ogólnej, zbliżając się do liderów takich jak o3 i Gemini 2.5 Pro.",
406
+ "deepseek/deepseek-r1/community.description": "DeepSeek R1 to najnowszy model open-source wydany przez zespół DeepSeek, charakteryzujący się bardzo silnymi zdolnościami rozumowania, szczególnie w zadaniach matematycznych, programistycznych i logicznych, porównywalnymi z OpenAI o1.",
407
+ "deepseek/deepseek-r1:free.description": "DeepSeek-R1 znacząco poprawia rozumowanie przy minimalnej ilości oznaczonych danych, generując łańcuch rozumowania przed ostateczną odpowiedzią w celu zwiększenia dokładności.",
408
+ "deepseek/deepseek-reasoner.description": "DeepSeek-V3 Thinking (reasoner) to eksperymentalny model rozumowania od DeepSeek, odpowiedni do zadań wymagających wysokiego poziomu złożoności logicznej.",
409
+ "deepseek/deepseek-v3.1-base.description": "DeepSeek V3.1 Base to ulepszona wersja modelu DeepSeek V3.",
410
+ "deepseek/deepseek-v3.description": "Szybki, uniwersalny model LLM z ulepszonymi zdolnościami rozumowania.",
411
+ "deepseek/deepseek-v3/community.description": "DeepSeek-V3 stanowi przełom w szybkości rozumowania względem poprzednich modeli. Zajmuje pierwsze miejsce wśród modeli open-source i dorównuje najbardziej zaawansowanym modelom zamkniętym. DeepSeek-V3 wykorzystuje Multi-Head Latent Attention (MLA) oraz architekturę DeepSeekMoE, obie w pełni sprawdzone w DeepSeek-V2. Wprowadza również bezstratną strategię pomocniczą dla równoważenia obciążenia oraz cel treningowy oparty na przewidywaniu wielu tokenów dla lepszej wydajności.",
412
+ "deepseek_r1.description": "DeepSeek-R1 to model rozumowania oparty na uczeniu przez wzmacnianie, który rozwiązuje problemy powtórzeń i czytelności. Przed etapem RL wykorzystuje dane startowe do dalszego zwiększenia zdolności rozumowania. Dorównuje OpenAI-o1 w zadaniach matematycznych, programistycznych i logicznych, a starannie zaprojektowany proces treningowy poprawia ogólne wyniki.",
413
+ "deepseek_r1_distill_llama_70b.description": "DeepSeek-R1-Distill-Llama-70B to model odchudzony z Llama-3.3-70B-Instruct. Jako część serii DeepSeek-R1, został dostrojony na próbkach wygenerowanych przez DeepSeek-R1 i osiąga wysokie wyniki w matematyce, programowaniu i rozumowaniu.",
414
+ "deepseek_r1_distill_qwen_14b.description": "DeepSeek-R1-Distill-Qwen-14B to model odchudzony z Qwen2.5-14B, dostrojony na 800 tysiącach starannie wyselekcjonowanych próbek wygenerowanych przez DeepSeek-R1, zapewniający silne zdolności rozumowania.",
415
+ "deepseek_r1_distill_qwen_32b.description": "DeepSeek-R1-Distill-Qwen-32B to model odchudzony z Qwen2.5-32B, dostrojony na 800 tysiącach starannie wyselekcjonowanych próbek wygenerowanych przez DeepSeek-R1, wyróżniający się w matematyce, programowaniu i rozumowaniu.",
403
416
  "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 to otwarty model językowy (LLM) stworzony z myślą o programistach, naukowcach i przedsiębiorstwach, zaprojektowany, by wspierać ich w budowaniu, eksperymentowaniu i odpowiedzialnym skalowaniu pomysłów z zakresu generatywnej sztucznej inteligencji. Jako fundament globalnej innowacji społecznościowej, doskonale sprawdza się przy ograniczonych zasobach obliczeniowych, na urządzeniach brzegowych oraz przy szybszym czasie trenowania.",
404
417
  "meta/Llama-3.2-11B-Vision-Instruct.description": "Zaawansowane rozumowanie obrazów w wysokiej rozdzielczości, idealne do aplikacji zrozumienia wizualnego.",
405
418
  "meta/Llama-3.2-90B-Vision-Instruct.description": "Zaawansowane rozumowanie obrazów dla aplikacji agentów opartych na zrozumieniu wizualnym.",
@@ -127,6 +127,10 @@
127
127
  "llm.proxyUrl.title": "Adres proxy API",
128
128
  "llm.waitingForMore": "Więcej modeli <1>wkrótce zostanie dodanych</1>, bądź na bieżąco",
129
129
  "llm.waitingForMoreLinkAriaLabel": "Otwórz formularz zgłoszenia dostawcy",
130
+ "marketPublish.forkConfirm.by": "przez {{author}}",
131
+ "marketPublish.forkConfirm.confirm": "Potwierdź publikację",
132
+ "marketPublish.forkConfirm.description": "Zamierzasz opublikować wersję pochodną opartą na istniejącym agencie ze społeczności. Twój nowy agent zostanie utworzony jako osobny wpis w marketplace.",
133
+ "marketPublish.forkConfirm.title": "Opublikuj agenta pochodnego",
130
134
  "marketPublish.modal.changelog.extra": "Opisz kluczowe zmiany i ulepszenia w tej wersji",
131
135
  "marketPublish.modal.changelog.label": "Dziennik zmian",
132
136
  "marketPublish.modal.changelog.maxLengthError": "Dziennik zmian nie może przekraczać 500 znaków",
@@ -524,6 +528,9 @@
524
528
  "tools.klavis.servers": "serwery",
525
529
  "tools.klavis.tools": "narzędzia",
526
530
  "tools.klavis.verifyAuth": "Ukończyłem uwierzytelnienie",
531
+ "tools.lobehubSkill.authorize": "Autoryzuj",
532
+ "tools.lobehubSkill.connect": "Połącz",
533
+ "tools.lobehubSkill.error": "Błąd",
527
534
  "tools.notInstalled": "Nie zainstalowano",
528
535
  "tools.notInstalledWarning": "Ta umiejętność nie jest obecnie zainstalowana, co może wpłynąć na funkcjonalność agenta.",
529
536
  "tools.plugins.enabled": "Włączone: {{num}}",
@@ -99,6 +99,7 @@
99
99
  "ModelSwitchPanel.goToSettings": "Ir para configurações",
100
100
  "ModelSwitchPanel.manageProvider": "Gerenciar Provedor",
101
101
  "ModelSwitchPanel.provider": "Provedor",
102
+ "ModelSwitchPanel.searchPlaceholder": "Buscar modelos...",
102
103
  "ModelSwitchPanel.title": "Modelo",
103
104
  "ModelSwitchPanel.useModelFrom": "Usar este modelo de:",
104
105
  "MultiImagesUpload.actions.uploadMore": "Clique ou arraste para enviar mais",
@@ -37,6 +37,7 @@
37
37
  "header.actions.notionGuide.title": "Importar do Notion",
38
38
  "header.actions.uploadFile": "Enviar Arquivo",
39
39
  "header.actions.uploadFolder": "Enviar Pasta",
40
+ "header.actions.uploadFolder.creatingFolders": "Criando estrutura de pastas...",
40
41
  "header.newPageButton": "Nova Página",
41
42
  "header.uploadButton": "Enviar",
42
43
  "home.getStarted": "Começar",
@@ -119,6 +120,8 @@
119
120
  "title": "Recursos",
120
121
  "toggleLeftPanel": "Mostrar/Ocultar Painel Esquerdo",
121
122
  "uploadDock.body.collapse": "Recolher",
123
+ "uploadDock.body.item.cancel": "Cancelar",
124
+ "uploadDock.body.item.cancelled": "Cancelado",
122
125
  "uploadDock.body.item.done": "Enviado",
123
126
  "uploadDock.body.item.error": "Falha no envio, tente novamente",
124
127
  "uploadDock.body.item.pending": "Preparando para envio...",
@@ -126,6 +129,7 @@
126
129
  "uploadDock.body.item.restTime": "Restam {{time}}",
127
130
  "uploadDock.fileQueueInfo": "Enviando os primeiros {{count}} arquivos, {{remaining}} restantes na fila",
128
131
  "uploadDock.totalCount": "Total de {{count}} itens",
132
+ "uploadDock.uploadStatus.cancelled": "Envio cancelado",
129
133
  "uploadDock.uploadStatus.error": "Erro no envio",
130
134
  "uploadDock.uploadStatus.pending": "Aguardando envio",
131
135
  "uploadDock.uploadStatus.processing": "Enviando",
@@ -413,6 +413,35 @@
413
413
  "deepseek_r1_distill_llama_70b.description": "DeepSeek-R1-Distill-Llama-70B é destilado do Llama-3.3-70B-Instruct. Como parte da série DeepSeek-R1, é ajustado com amostras geradas pelo DeepSeek-R1 e apresenta forte desempenho em matemática, programação e raciocínio.",
414
414
  "deepseek_r1_distill_qwen_14b.description": "DeepSeek-R1-Distill-Qwen-14B é destilado do Qwen2.5-14B e ajustado com 800 mil amostras selecionadas geradas pelo DeepSeek-R1, oferecendo raciocínio robusto.",
415
415
  "deepseek_r1_distill_qwen_32b.description": "DeepSeek-R1-Distill-Qwen-32B é destilado do Qwen2.5-32B e ajustado com 800 mil amostras selecionadas geradas pelo DeepSeek-R1, destacando-se em matemática, programação e raciocínio.",
416
+ "devstral-2:123b.description": "O Devstral 2 123B se destaca no uso de ferramentas para explorar bases de código, editar múltiplos arquivos e oferecer suporte a agentes de engenharia de software.",
417
+ "doubao-1.5-lite-32k.description": "O Doubao-1.5-lite é um novo modelo leve com resposta ultrarrápida, oferecendo qualidade e latência de alto nível.",
418
+ "doubao-1.5-pro-256k.description": "O Doubao-1.5-pro-256k é uma atualização abrangente do Doubao-1.5-Pro, com melhoria de 10% no desempenho geral. Suporta uma janela de contexto de 256k e até 12k tokens de saída, oferecendo maior desempenho, janela expandida e excelente custo-benefício para casos de uso mais amplos.",
419
+ "doubao-1.5-pro-32k.description": "O Doubao-1.5-pro é um modelo carro-chefe de nova geração com melhorias em todas as áreas, destacando-se em conhecimento, programação e raciocínio.",
420
+ "doubao-1.5-thinking-pro-m.description": "O Doubao-1.5 é um novo modelo de raciocínio profundo (a versão m inclui raciocínio multimodal nativo) que se destaca em matemática, programação, raciocínio científico e tarefas gerais como escrita criativa. Alcança ou se aproxima dos melhores resultados em benchmarks como AIME 2024, Codeforces e GPQA. Suporta uma janela de contexto de 128k e saída de até 16k tokens.",
421
+ "doubao-1.5-thinking-pro.description": "O Doubao-1.5 é um novo modelo de raciocínio profundo que se destaca em matemática, programação, raciocínio científico e tarefas gerais como escrita criativa. Alcança ou se aproxima dos melhores resultados em benchmarks como AIME 2024, Codeforces e GPQA. Suporta uma janela de contexto de 128k e saída de até 16k tokens.",
422
+ "doubao-1.5-thinking-vision-pro.description": "Um novo modelo visual de raciocínio profundo com compreensão e raciocínio multimodal aprimorados, alcançando resultados SOTA em 37 de 59 benchmarks públicos.",
423
+ "doubao-1.5-ui-tars.description": "O Doubao-1.5-UI-TARS é um modelo de agente com foco nativo em interfaces gráficas, interagindo perfeitamente com interfaces por meio de percepção, raciocínio e ação semelhantes às humanas.",
424
+ "doubao-1.5-vision-lite.description": "O Doubao-1.5-vision-lite é um modelo multimodal aprimorado que suporta imagens em qualquer resolução e proporções extremas, melhorando o raciocínio visual, reconhecimento de documentos, compreensão de detalhes e seguimento de instruções. Suporta uma janela de contexto de 128k e até 16k tokens de saída.",
425
+ "doubao-1.5-vision-pro-32k.description": "O Doubao-1.5-vision-pro é um modelo multimodal aprimorado que suporta imagens em qualquer resolução e proporções extremas, melhorando o raciocínio visual, reconhecimento de documentos, compreensão de detalhes e seguimento de instruções.",
426
+ "doubao-1.5-vision-pro.description": "O Doubao-1.5-vision-pro é um modelo multimodal aprimorado que suporta imagens em qualquer resolução e proporções extremas, melhorando o raciocínio visual, reconhecimento de documentos, compreensão de detalhes e seguimento de instruções.",
427
+ "doubao-lite-128k.description": "Resposta ultrarrápida com melhor custo-benefício, oferecendo mais flexibilidade em diversos cenários. Suporta raciocínio e ajuste fino com janela de contexto de 128k.",
428
+ "doubao-lite-32k.description": "Resposta ultrarrápida com melhor custo-benefício, oferecendo mais flexibilidade em diversos cenários. Suporta raciocínio e ajuste fino com janela de contexto de 32k.",
429
+ "doubao-lite-4k.description": "Resposta ultrarrápida com melhor custo-benefício, oferecendo mais flexibilidade em diversos cenários. Suporta raciocínio e ajuste fino com janela de contexto de 4k.",
430
+ "doubao-pro-256k.description": "O modelo carro-chefe com melhor desempenho para tarefas complexas, com excelentes resultados em QA com referência, sumarização, criação, classificação de texto e simulação de papéis. Suporta raciocínio e ajuste fino com janela de contexto de 256k.",
431
+ "doubao-pro-32k.description": "O modelo carro-chefe com melhor desempenho para tarefas complexas, com excelentes resultados em QA com referência, sumarização, criação, classificação de texto e simulação de papéis. Suporta raciocínio e ajuste fino com janela de contexto de 32k.",
432
+ "doubao-seed-1.6-flash.description": "O Doubao-Seed-1.6-flash é um modelo multimodal de raciocínio profundo ultrarrápido com TPOT de até 10ms. Suporta entrada de texto e imagem, supera o modelo lite anterior em compreensão de texto e se equipara aos modelos pro concorrentes em visão. Suporta janela de contexto de 256k e até 16k tokens de saída.",
433
+ "doubao-seed-1.6-lite.description": "O Doubao-Seed-1.6-lite é um novo modelo multimodal de raciocínio profundo com esforço de raciocínio ajustável (Mínimo, Baixo, Médio, Alto), oferecendo melhor custo-benefício e sendo uma escolha sólida para tarefas comuns, com janela de contexto de até 256k.",
434
+ "doubao-seed-1.6-thinking.description": "O Doubao-Seed-1.6-thinking fortalece significativamente o raciocínio, melhorando ainda mais as habilidades centrais em programação, matemática e raciocínio lógico em relação ao Doubao-1.5-thinking-pro, além de adicionar compreensão visual. Suporta janela de contexto de 256k e até 16k tokens de saída.",
435
+ "doubao-seed-1.6-vision.description": "O Doubao-Seed-1.6-vision é um modelo visual de raciocínio profundo que oferece compreensão e raciocínio multimodal mais robustos para educação, revisão de imagens, inspeção/segurança e perguntas e respostas com busca por IA. Suporta janela de contexto de 256k e até 64k tokens de saída.",
436
+ "doubao-seed-1.6.description": "O Doubao-Seed-1.6 é um novo modelo multimodal de raciocínio profundo com modos automático, com raciocínio e sem raciocínio. No modo sem raciocínio, supera significativamente o Doubao-1.5-pro/250115. Suporta janela de contexto de 256k e até 16k tokens de saída.",
437
+ "doubao-seed-1.8.description": "O Doubao-Seed-1.8 possui capacidades aprimoradas de compreensão multimodal e de agentes, suportando entrada de texto/imagem/vídeo e cache de contexto, oferecendo desempenho superior em tarefas complexas.",
438
+ "doubao-seed-code.description": "O Doubao-Seed-Code é profundamente otimizado para programação com agentes, suporta entradas multimodais (texto/imagem/vídeo) e janela de contexto de 256k, é compatível com a API da Anthropic e adequado para fluxos de trabalho de programação, compreensão visual e agentes.",
439
+ "doubao-seededit-3-0-i2i-250628.description": "O modelo de imagem Doubao da ByteDance Seed suporta entradas de texto e imagem com geração de imagem altamente controlável e de alta qualidade. Suporta edição de imagem guiada por texto, com tamanhos de saída entre 512 e 1536 no lado mais longo.",
440
+ "doubao-seedream-3-0-t2i-250415.description": "O Seedream 3.0 é um modelo de geração de imagem da ByteDance Seed, que suporta entradas de texto e imagem com geração de imagem altamente controlável e de alta qualidade. Gera imagens a partir de comandos de texto.",
441
+ "doubao-seedream-4-0-250828.description": "O Seedream 4.0 é um modelo de geração de imagem da ByteDance Seed, que suporta entradas de texto e imagem com geração de imagem altamente controlável e de alta qualidade. Gera imagens a partir de comandos de texto.",
442
+ "doubao-vision-lite-32k.description": "O Doubao-vision é um modelo multimodal da Doubao com forte compreensão e raciocínio de imagens, além de seguir instruções com precisão. Tem bom desempenho em tarefas de extração imagem-texto e raciocínio baseado em imagem, permitindo cenários de QA visual mais complexos e amplos.",
443
+ "doubao-vision-pro-32k.description": "O Doubao-vision é um modelo multimodal da Doubao com forte compreensão e raciocínio de imagens, além de seguir instruções com precisão. Tem bom desempenho em tarefas de extração imagem-texto e raciocínio baseado em imagem, permitindo cenários de QA visual mais complexos e amplos.",
444
+ "emohaa.description": "O Emohaa é um modelo voltado para saúde mental com habilidades profissionais de aconselhamento para ajudar os usuários a compreender questões emocionais.",
416
445
  "meta.llama3-8b-instruct-v1:0.description": "O Meta Llama 3 é um modelo de linguagem aberto para desenvolvedores, pesquisadores e empresas, projetado para ajudá-los a construir, experimentar e escalar ideias de IA generativa de forma responsável. Como parte da base para a inovação da comunidade global, é ideal para ambientes com recursos computacionais limitados, dispositivos de borda e tempos de treinamento mais rápidos.",
417
446
  "mistral-large-latest.description": "Mistral Large é o modelo principal, com excelente desempenho em tarefas multilíngues, raciocínio complexo e geração de código — ideal para aplicações de alto nível.",
418
447
  "mistral-large.description": "Mixtral Large é o modelo principal da Mistral, combinando geração de código, matemática e raciocínio com uma janela de contexto de 128K.",
@@ -127,6 +127,10 @@
127
127
  "llm.proxyUrl.title": "URL do Proxy da API",
128
128
  "llm.waitingForMore": "Mais modelos estão <1>planejados para serem adicionados</1>, fique atento",
129
129
  "llm.waitingForMoreLinkAriaLabel": "Abrir formulário de solicitação de provedor",
130
+ "marketPublish.forkConfirm.by": "por {{author}}",
131
+ "marketPublish.forkConfirm.confirm": "Confirmar Publicação",
132
+ "marketPublish.forkConfirm.description": "Você está prestes a publicar uma versão derivada com base em um agente existente da comunidade. Seu novo agente será criado como uma entrada separada no marketplace.",
133
+ "marketPublish.forkConfirm.title": "Publicar Agente Derivado",
130
134
  "marketPublish.modal.changelog.extra": "Descreva as principais mudanças e melhorias nesta versão",
131
135
  "marketPublish.modal.changelog.label": "Registro de alterações",
132
136
  "marketPublish.modal.changelog.maxLengthError": "O registro de alterações não pode exceder 500 caracteres",
@@ -524,6 +528,9 @@
524
528
  "tools.klavis.servers": "servidores",
525
529
  "tools.klavis.tools": "ferramentas",
526
530
  "tools.klavis.verifyAuth": "Concluí a autenticação",
531
+ "tools.lobehubSkill.authorize": "Autorizar",
532
+ "tools.lobehubSkill.connect": "Conectar",
533
+ "tools.lobehubSkill.error": "Erro",
527
534
  "tools.notInstalled": "Não Instalado",
528
535
  "tools.notInstalledWarning": "Esta habilidade não está instalada atualmente, o que pode afetar a funcionalidade do agente.",
529
536
  "tools.plugins.enabled": "Ativadas: {{num}}",
@@ -99,6 +99,7 @@
99
99
  "ModelSwitchPanel.goToSettings": "Перейти в настройки",
100
100
  "ModelSwitchPanel.manageProvider": "Управление провайдером",
101
101
  "ModelSwitchPanel.provider": "Провайдер",
102
+ "ModelSwitchPanel.searchPlaceholder": "Поиск моделей...",
102
103
  "ModelSwitchPanel.title": "Модель",
103
104
  "ModelSwitchPanel.useModelFrom": "Использовать эту модель от:",
104
105
  "MultiImagesUpload.actions.uploadMore": "Нажмите или перетащите для загрузки дополнительных изображений",
@@ -37,6 +37,7 @@
37
37
  "header.actions.notionGuide.title": "Импорт из Notion",
38
38
  "header.actions.uploadFile": "Загрузить файл",
39
39
  "header.actions.uploadFolder": "Загрузить папку",
40
+ "header.actions.uploadFolder.creatingFolders": "Создание структуры папок...",
40
41
  "header.newPageButton": "Новая страница",
41
42
  "header.uploadButton": "Загрузить",
42
43
  "home.getStarted": "Начать",
@@ -119,6 +120,8 @@
119
120
  "title": "Ресурсы",
120
121
  "toggleLeftPanel": "Показать/Скрыть левую панель",
121
122
  "uploadDock.body.collapse": "Свернуть",
123
+ "uploadDock.body.item.cancel": "Отменить",
124
+ "uploadDock.body.item.cancelled": "Отменено",
122
125
  "uploadDock.body.item.done": "Загружено",
123
126
  "uploadDock.body.item.error": "Ошибка загрузки, попробуйте снова",
124
127
  "uploadDock.body.item.pending": "Подготовка к загрузке...",
@@ -126,6 +129,7 @@
126
129
  "uploadDock.body.item.restTime": "Осталось {{time}}",
127
130
  "uploadDock.fileQueueInfo": "Загружаются первые {{count}} файлов, {{remaining}} в очереди",
128
131
  "uploadDock.totalCount": "Всего {{count}} элементов",
132
+ "uploadDock.uploadStatus.cancelled": "Загрузка отменена",
129
133
  "uploadDock.uploadStatus.error": "Ошибка загрузки",
130
134
  "uploadDock.uploadStatus.pending": "Ожидание загрузки",
131
135
  "uploadDock.uploadStatus.processing": "Загрузка",
@@ -413,6 +413,7 @@
413
413
  "deepseek_r1_distill_llama_70b.description": "DeepSeek-R1-Distill-Llama-70B — дистиллированная модель на основе Llama-3.3-70B-Instruct. Является частью серии DeepSeek-R1, дообучена на выборках, сгенерированных DeepSeek-R1, и демонстрирует высокие результаты в математике, программировании и логике.",
414
414
  "deepseek_r1_distill_qwen_14b.description": "DeepSeek-R1-Distill-Qwen-14B — дистиллированная модель на основе Qwen2.5-14B, дообученная на 800K отобранных выборках, сгенерированных DeepSeek-R1, обеспечивая высокое качество логического мышления.",
415
415
  "deepseek_r1_distill_qwen_32b.description": "DeepSeek-R1-Distill-Qwen-32B — дистиллированная модель на основе Qwen2.5-32B, дообученная на 800K отобранных выборках, сгенерированных DeepSeek-R1, превосходящая в математике, программировании и логике.",
416
+ "devstral-2:123b.description": "Devstral 2 123B превосходно использует инструменты для анализа кодовой базы, редактирования нескольких файлов и поддержки агентов в области программной инженерии.",
416
417
  "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 — это открытая LLM для разработчиков, исследователей и предприятий, созданная для поддержки создания, экспериментов и ответственного масштабирования идей генеративного ИИ. Являясь частью основы для глобальных инноваций сообщества, она хорошо подходит для ограниченных вычислительных ресурсов, устройств на периферии и ускоренного обучения.",
417
418
  "meta/Llama-3.2-11B-Vision-Instruct.description": "Модель с высокой способностью к визуальному рассуждению на изображениях высокого разрешения, подходящая для приложений визуального понимания.",
418
419
  "meta/Llama-3.2-90B-Vision-Instruct.description": "Продвинутая модель визуального рассуждения для агентов, ориентированных на визуальное понимание.",
@@ -127,6 +127,10 @@
127
127
  "llm.proxyUrl.title": "URL прокси API",
128
128
  "llm.waitingForMore": "Планируется <1>добавление новых моделей</1>, следите за обновлениями",
129
129
  "llm.waitingForMoreLinkAriaLabel": "Открыть форму запроса провайдера",
130
+ "marketPublish.forkConfirm.by": "от {{author}}",
131
+ "marketPublish.forkConfirm.confirm": "Подтвердить публикацию",
132
+ "marketPublish.forkConfirm.description": "Вы собираетесь опубликовать производную версию, основанную на существующем агенте из сообщества. Ваш новый агент будет создан как отдельная запись на маркетплейсе.",
133
+ "marketPublish.forkConfirm.title": "Публикация производного агента",
130
134
  "marketPublish.modal.changelog.extra": "Опишите ключевые изменения и улучшения в этой версии",
131
135
  "marketPublish.modal.changelog.label": "Список изменений",
132
136
  "marketPublish.modal.changelog.maxLengthError": "Список изменений не должен превышать 500 символов",
@@ -524,6 +528,9 @@
524
528
  "tools.klavis.servers": "серверы",
525
529
  "tools.klavis.tools": "инструменты",
526
530
  "tools.klavis.verifyAuth": "Я завершил аутентификацию",
531
+ "tools.lobehubSkill.authorize": "Авторизовать",
532
+ "tools.lobehubSkill.connect": "Подключить",
533
+ "tools.lobehubSkill.error": "Ошибка",
527
534
  "tools.notInstalled": "Не установлено",
528
535
  "tools.notInstalledWarning": "Этот навык не установлен, что может повлиять на работу агента.",
529
536
  "tools.plugins.enabled": "Включено: {{num}}",
@@ -99,6 +99,7 @@
99
99
  "ModelSwitchPanel.goToSettings": "Ayarlar'a git",
100
100
  "ModelSwitchPanel.manageProvider": "Sağlayıcıyı Yönet",
101
101
  "ModelSwitchPanel.provider": "Sağlayıcı",
102
+ "ModelSwitchPanel.searchPlaceholder": "Modelleri ara...",
102
103
  "ModelSwitchPanel.title": "Model",
103
104
  "ModelSwitchPanel.useModelFrom": "Bu modelin kullanıldığı yer:",
104
105
  "MultiImagesUpload.actions.uploadMore": "Daha fazla yüklemek için tıklayın veya sürükleyin",
@@ -37,6 +37,7 @@
37
37
  "header.actions.notionGuide.title": "Notion'dan İçe Aktar",
38
38
  "header.actions.uploadFile": "Dosya Yükle",
39
39
  "header.actions.uploadFolder": "Klasör Yükle",
40
+ "header.actions.uploadFolder.creatingFolders": "Klasör yapısı oluşturuluyor...",
40
41
  "header.newPageButton": "Yeni Sayfa",
41
42
  "header.uploadButton": "Yükle",
42
43
  "home.getStarted": "Başlayın",
@@ -119,6 +120,8 @@
119
120
  "title": "Kaynaklar",
120
121
  "toggleLeftPanel": "Sol Paneli Göster/Gizle",
121
122
  "uploadDock.body.collapse": "Daralt",
123
+ "uploadDock.body.item.cancel": "İptal Et",
124
+ "uploadDock.body.item.cancelled": "İptal Edildi",
122
125
  "uploadDock.body.item.done": "Yüklendi",
123
126
  "uploadDock.body.item.error": "Yükleme başarısız, lütfen tekrar deneyin",
124
127
  "uploadDock.body.item.pending": "Yüklemeye hazırlanıyor...",
@@ -126,6 +129,7 @@
126
129
  "uploadDock.body.item.restTime": "Kalan süre: {{time}}",
127
130
  "uploadDock.fileQueueInfo": "İlk {{count}} dosya yükleniyor, sırada {{remaining}} dosya kaldı",
128
131
  "uploadDock.totalCount": "Toplam {{count}} öğe",
132
+ "uploadDock.uploadStatus.cancelled": "Yükleme iptal edildi",
129
133
  "uploadDock.uploadStatus.error": "Yükleme hatası",
130
134
  "uploadDock.uploadStatus.pending": "Yükleme bekleniyor",
131
135
  "uploadDock.uploadStatus.processing": "Yükleniyor",
@@ -413,6 +413,35 @@
413
413
  "deepseek_r1_distill_llama_70b.description": "DeepSeek-R1-Distill-Llama-70B, Llama-3.3-70B-Instruct'tan damıtılmıştır. DeepSeek-R1 serisinin bir parçası olarak, DeepSeek-R1 tarafından üretilen örneklerle ince ayar yapılmıştır ve matematik, kodlama ve akıl yürütme alanlarında güçlü performans sunar.",
414
414
  "deepseek_r1_distill_qwen_14b.description": "DeepSeek-R1-Distill-Qwen-14B, Qwen2.5-14B'den damıtılmıştır ve DeepSeek-R1 tarafından üretilen 800K seçilmiş örnekle ince ayar yapılmıştır. Güçlü akıl yürütme yetenekleri sunar.",
415
415
  "deepseek_r1_distill_qwen_32b.description": "DeepSeek-R1-Distill-Qwen-32B, Qwen2.5-32B'den damıtılmıştır ve DeepSeek-R1 tarafından üretilen 800K seçilmiş örnekle ince ayar yapılmıştır. Matematik, kodlama ve akıl yürütme alanlarında üstün performans gösterir.",
416
+ "devstral-2:123b.description": "Devstral 2 123B, kod tabanlarını keşfetme, birden fazla dosyayı düzenleme ve yazılım mühendisliği ajanlarını destekleme konusunda araçları etkili bir şekilde kullanma yeteneğiyle öne çıkar.",
417
+ "doubao-1.5-lite-32k.description": "Doubao-1.5-lite, ultra hızlı yanıt süresiyle hafif bir modeldir ve üst düzey kalite ile düşük gecikme sunar.",
418
+ "doubao-1.5-pro-256k.description": "Doubao-1.5-pro-256k, Doubao-1.5-Pro'nun kapsamlı bir yükseltmesidir ve genel performansta %10 artış sağlar. 256k bağlam penceresini ve 12k'ya kadar çıktı belirtecini destekler; daha yüksek performans, daha geniş pencere ve daha fazla kullanım senaryosu için güçlü bir değer sunar.",
419
+ "doubao-1.5-pro-32k.description": "Doubao-1.5-pro, bilgi, kodlama ve akıl yürütme alanlarında üstün performans gösteren yeni nesil amiral gemisi modeldir.",
420
+ "doubao-1.5-thinking-pro-m.description": "Doubao-1.5, matematik, kodlama, bilimsel akıl yürütme ve yaratıcı yazım gibi genel görevlerde üstün performans gösteren yeni bir derin akıl yürütme modelidir (m versiyonu yerel çok modlu derin akıl yürütme içerir). AIME 2024, Codeforces ve GPQA gibi kıyaslamalarda en üst düzey sonuçlara ulaşır veya yaklaşır. 128k bağlam penceresi ve 16k çıktı desteği sunar.",
421
+ "doubao-1.5-thinking-pro.description": "Doubao-1.5, matematik, kodlama, bilimsel akıl yürütme ve yaratıcı yazım gibi genel görevlerde üstün performans gösteren yeni bir derin akıl yürütme modelidir. AIME 2024, Codeforces ve GPQA gibi kıyaslamalarda en üst düzey sonuçlara ulaşır veya yaklaşır. 128k bağlam penceresi ve 16k çıktı desteği sunar.",
422
+ "doubao-1.5-thinking-vision-pro.description": "Görsel akıl yürütme ve çok modlu anlama alanlarında daha güçlü performans sunan yeni bir görsel derin akıl yürütme modeli; 59 halka açık kıyaslamanın 37'sinde SOTA sonuçlar elde eder.",
423
+ "doubao-1.5-ui-tars.description": "Doubao-1.5-UI-TARS, insan benzeri algı, akıl yürütme ve eylem yoluyla arayüzlerle sorunsuz etkileşim kuran yerel GUI odaklı bir ajan modelidir.",
424
+ "doubao-1.5-vision-lite.description": "Doubao-1.5-vision-lite, herhangi bir çözünürlükte ve uç oranlarda görselleri destekleyen, görsel akıl yürütme, belge tanıma, detay anlama ve talimat takibini geliştiren yükseltilmiş bir çok modlu modeldir. 128k bağlam penceresi ve 16k'ya kadar çıktı belirteci desteği sunar.",
425
+ "doubao-1.5-vision-pro-32k.description": "Doubao-1.5-vision-pro, herhangi bir çözünürlükte ve uç oranlarda görselleri destekleyen, görsel akıl yürütme, belge tanıma, detay anlama ve talimat takibini geliştiren yükseltilmiş bir çok modlu modeldir.",
426
+ "doubao-1.5-vision-pro.description": "Doubao-1.5-vision-pro, herhangi bir çözünürlükte ve uç oranlarda görselleri destekleyen, görsel akıl yürütme, belge tanıma, detay anlama ve talimat takibini geliştiren yükseltilmiş bir çok modlu modeldir.",
427
+ "doubao-lite-128k.description": "Ultra hızlı yanıt süresi ve daha iyi değer sunar, farklı senaryolarda esnek seçenekler sağlar. 128k bağlam penceresiyle akıl yürütme ve ince ayar desteklenir.",
428
+ "doubao-lite-32k.description": "Ultra hızlı yanıt süresi ve daha iyi değer sunar, farklı senaryolarda esnek seçenekler sağlar. 32k bağlam penceresiyle akıl yürütme ve ince ayar desteklenir.",
429
+ "doubao-lite-4k.description": "Ultra hızlı yanıt süresi ve daha iyi değer sunar, farklı senaryolarda esnek seçenekler sağlar. 4k bağlam penceresiyle akıl yürütme ve ince ayar desteklenir.",
430
+ "doubao-pro-256k.description": "Referans Soru-Cevap, özetleme, içerik üretimi, metin sınıflandırma ve rol yapma gibi karmaşık görevlerde en iyi performansı sunan amiral gemisi modeldir. 256k bağlam penceresiyle akıl yürütme ve ince ayar desteklenir.",
431
+ "doubao-pro-32k.description": "Referans Soru-Cevap, özetleme, içerik üretimi, metin sınıflandırma ve rol yapma gibi karmaşık görevlerde en iyi performansı sunan amiral gemisi modeldir. 32k bağlam penceresiyle akıl yürütme ve ince ayar desteklenir.",
432
+ "doubao-seed-1.6-flash.description": "Doubao-Seed-1.6-flash, TPOT süresi 10ms kadar düşük olan ultra hızlı çok modlu derin akıl yürütme modelidir. Metin ve görseli destekler, metin anlama açısından önceki lite modelin ötesine geçer ve görselde rakip pro modellerle eşleşir. 256k bağlam penceresi ve 16k'ya kadar çıktı belirteci desteği sunar.",
433
+ "doubao-seed-1.6-lite.description": "Doubao-Seed-1.6-lite, ayarlanabilir akıl yürütme düzeyine (Minimal, Düşük, Orta, Yüksek) sahip yeni bir çok modlu derin akıl yürütme modelidir. Yaygın görevler için daha iyi değer sunar ve 256k'ya kadar bağlam penceresini destekler.",
434
+ "doubao-seed-1.6-thinking.description": "Doubao-Seed-1.6-thinking, Doubao-1.5-thinking-pro'ya kıyasla kodlama, matematik ve mantıksal akıl yürütme gibi temel yetenekleri önemli ölçüde geliştirir ve görsel anlama yeteneği ekler. 256k bağlam penceresi ve 16k'ya kadar çıktı belirteci desteği sunar.",
435
+ "doubao-seed-1.6-vision.description": "Doubao-Seed-1.6-vision, eğitim, görsel inceleme, güvenlik ve yapay zeka arama Soru-Cevap gibi alanlarda daha güçlü çok modlu anlama ve akıl yürütme sunan görsel derin akıl yürütme modelidir. 256k bağlam penceresi ve 64k'ya kadar çıktı belirteci desteği sunar.",
436
+ "doubao-seed-1.6.description": "Doubao-Seed-1.6, otomatik, düşünme ve düşünmeyen modlara sahip yeni bir çok modlu derin akıl yürütme modelidir. Düşünmeyen modda, Doubao-1.5-pro/250115'e kıyasla önemli ölçüde daha iyi performans gösterir. 256k bağlam penceresi ve 16k'ya kadar çıktı belirteci desteği sunar.",
437
+ "doubao-seed-1.8.description": "Doubao-Seed-1.8, daha güçlü çok modlu anlama ve Ajan yeteneklerine sahiptir; metin/görsel/video girişi ve bağlam önbelleğini destekler, karmaşık görevlerde üstün performans sunar.",
438
+ "doubao-seed-code.description": "Doubao-Seed-Code, ajan tabanlı kodlama için derinlemesine optimize edilmiştir; çok modlu girişleri (metin/görsel/video) ve 256k bağlam penceresini destekler, Anthropic API ile uyumludur ve kodlama, görsel anlama ve ajan iş akışları için uygundur.",
439
+ "doubao-seededit-3-0-i2i-250628.description": "ByteDance Seed tarafından geliştirilen Doubao görsel modeli, metin ve görsel girişlerini destekler ve yüksek kaliteli, kontrol edilebilir görsel üretimi sunar. Metinle yönlendirilen görsel düzenlemeyi destekler ve çıktı boyutları uzun kenarda 512 ile 1536 arasında değişir.",
440
+ "doubao-seedream-3-0-t2i-250415.description": "Seedream 3.0, ByteDance Seed tarafından geliştirilen bir görsel üretim modelidir; metin ve görsel girişlerini destekler, yüksek kaliteli ve kontrol edilebilir görseller üretir. Metin istemlerinden görseller oluşturur.",
441
+ "doubao-seedream-4-0-250828.description": "Seedream 4.0, ByteDance Seed tarafından geliştirilen bir görsel üretim modelidir; metin ve görsel girişlerini destekler, yüksek kaliteli ve kontrol edilebilir görseller üretir. Metin istemlerinden görseller oluşturur.",
442
+ "doubao-vision-lite-32k.description": "Doubao-vision, güçlü görsel anlama ve akıl yürütme yeteneklerine sahip bir Doubao çok modlu modelidir; doğru talimat takibiyle birlikte görsel-metin çıkarımı ve görsel tabanlı akıl yürütme görevlerinde başarılıdır. Daha karmaşık ve geniş görsel Soru-Cevap senaryolarını mümkün kılar.",
443
+ "doubao-vision-pro-32k.description": "Doubao-vision, güçlü görsel anlama ve akıl yürütme yeteneklerine sahip bir Doubao çok modlu modelidir; doğru talimat takibiyle birlikte görsel-metin çıkarımı ve görsel tabanlı akıl yürütme görevlerinde başarılıdır. Daha karmaşık ve geniş görsel Soru-Cevap senaryolarını mümkün kılar.",
444
+ "emohaa.description": "Emohaa, kullanıcıların duygusal sorunları anlamalarına yardımcı olmak için profesyonel danışmanlık yeteneklerine sahip bir ruh sağlığı modelidir.",
416
445
  "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3, geliştiriciler, araştırmacılar ve işletmeler için açık bir büyük dil modeli (LLM) olup, üretken yapay zeka fikirlerini oluşturma, deneme ve sorumlu bir şekilde ölçeklendirme süreçlerinde yardımcı olmak üzere tasarlanmıştır. Küresel topluluk inovasyonunun temel taşlarından biri olarak, sınırlı bilgi işlem gücü ve kaynaklara sahip ortamlar, uç cihazlar ve daha hızlı eğitim süreleri için uygundur.",
417
446
  "mistral-small-latest.description": "Mistral Small, çeviri, özetleme ve duygu analizi için uygun maliyetli, hızlı ve güvenilir bir seçenektir.",
418
447
  "mistral-small.description": "Mistral Small, yüksek verimlilik ve düşük gecikme gerektiren her türlü dil tabanlı görev için uygundur.",
@@ -127,6 +127,10 @@
127
127
  "llm.proxyUrl.title": "API Proxy URL'si",
128
128
  "llm.waitingForMore": "Daha fazla model <1>eklenmek üzere planlanıyor</1>, bizi takipte kalın",
129
129
  "llm.waitingForMoreLinkAriaLabel": "Sağlayıcı istek formunu aç",
130
+ "marketPublish.forkConfirm.by": "{{author}} tarafından",
131
+ "marketPublish.forkConfirm.confirm": "Yayınlamayı Onayla",
132
+ "marketPublish.forkConfirm.description": "Topluluktaki mevcut bir ajana dayalı türev bir sürüm yayınlamak üzeresiniz. Yeni ajanınız, pazaryerinde ayrı bir giriş olarak oluşturulacaktır.",
133
+ "marketPublish.forkConfirm.title": "Türev Ajanı Yayınla",
130
134
  "marketPublish.modal.changelog.extra": "Bu sürümdeki önemli değişiklikleri ve iyileştirmeleri açıklayın",
131
135
  "marketPublish.modal.changelog.label": "Değişiklik Günlüğü",
132
136
  "marketPublish.modal.changelog.maxLengthError": "Değişiklik günlüğü 500 karakteri geçmemelidir",
@@ -524,6 +528,9 @@
524
528
  "tools.klavis.servers": "sunucular",
525
529
  "tools.klavis.tools": "araçlar",
526
530
  "tools.klavis.verifyAuth": "Kimlik doğrulamasını tamamladım",
531
+ "tools.lobehubSkill.authorize": "Yetkilendir",
532
+ "tools.lobehubSkill.connect": "Bağlan",
533
+ "tools.lobehubSkill.error": "Hata",
527
534
  "tools.notInstalled": "Yüklü Değil",
528
535
  "tools.notInstalledWarning": "Bu yetenek şu anda yüklü değil, bu temsilcinin işlevselliğini etkileyebilir.",
529
536
  "tools.plugins.enabled": "Etkin: {{num}}",
@@ -99,6 +99,7 @@
99
99
  "ModelSwitchPanel.goToSettings": "Đi tới cài đặt",
100
100
  "ModelSwitchPanel.manageProvider": "Quản Lý Nhà Cung Cấp",
101
101
  "ModelSwitchPanel.provider": "Nhà cung cấp",
102
+ "ModelSwitchPanel.searchPlaceholder": "Tìm kiếm mô hình...",
102
103
  "ModelSwitchPanel.title": "Mô hình",
103
104
  "ModelSwitchPanel.useModelFrom": "Sử dụng mô hình này từ:",
104
105
  "MultiImagesUpload.actions.uploadMore": "Nhấn hoặc kéo để tải thêm",
@@ -37,6 +37,7 @@
37
37
  "header.actions.notionGuide.title": "Nhập từ Notion",
38
38
  "header.actions.uploadFile": "Tải Lên Tệp",
39
39
  "header.actions.uploadFolder": "Tải Lên Thư Mục",
40
+ "header.actions.uploadFolder.creatingFolders": "Đang tạo cấu trúc thư mục...",
40
41
  "header.newPageButton": "Trang Mới",
41
42
  "header.uploadButton": "Tải Lên",
42
43
  "home.getStarted": "Bắt Đầu",
@@ -119,6 +120,8 @@
119
120
  "title": "Tài Nguyên",
120
121
  "toggleLeftPanel": "Hiện/Ẩn Bảng Bên Trái",
121
122
  "uploadDock.body.collapse": "Thu Gọn",
123
+ "uploadDock.body.item.cancel": "Hủy",
124
+ "uploadDock.body.item.cancelled": "Đã hủy",
122
125
  "uploadDock.body.item.done": "Đã tải lên",
123
126
  "uploadDock.body.item.error": "Tải lên thất bại, vui lòng thử lại",
124
127
  "uploadDock.body.item.pending": "Đang chuẩn bị tải lên...",
@@ -126,6 +129,7 @@
126
129
  "uploadDock.body.item.restTime": "Còn lại {{time}}",
127
130
  "uploadDock.fileQueueInfo": "Đang tải lên {{count}} tệp đầu tiên, còn lại {{remaining}} trong hàng đợi",
128
131
  "uploadDock.totalCount": "Tổng cộng {{count}} mục",
132
+ "uploadDock.uploadStatus.cancelled": "Tải lên đã bị hủy",
129
133
  "uploadDock.uploadStatus.error": "Lỗi tải lên",
130
134
  "uploadDock.uploadStatus.pending": "Đang chờ tải lên",
131
135
  "uploadDock.uploadStatus.processing": "Đang tải lên",
@@ -335,6 +335,7 @@
335
335
  "computer-use-preview.description": "computer-use-preview là mô hình chuyên biệt cho công cụ \"sử dụng máy tính\", được huấn luyện để hiểu và thực hiện các tác vụ liên quan đến máy tính.",
336
336
  "dall-e-2.description": "DALL·E thế hệ thứ hai với khả năng tạo hình ảnh thực tế, chính xác hơn và độ phân giải gấp 4 lần thế hệ đầu.",
337
337
  "dall-e-3.description": "Mô hình DALL·E mới nhất, phát hành vào tháng 11 năm 2023, hỗ trợ tạo hình ảnh thực tế, chính xác hơn với chi tiết mạnh mẽ hơn.",
338
+ "databricks/dbrx-instruct.description": "DBRX Instruct cung cấp khả năng xử lý hướng dẫn đáng tin cậy cao trong nhiều ngành công nghiệp.",
338
339
  "meta.llama3-8b-instruct-v1:0.description": "Meta Llama 3 là một mô hình ngôn ngữ mở dành cho nhà phát triển, nhà nghiên cứu và doanh nghiệp, được thiết kế để hỗ trợ xây dựng, thử nghiệm và mở rộng các ý tưởng AI sinh ngữ một cách có trách nhiệm. Là một phần trong nền tảng đổi mới cộng đồng toàn cầu, mô hình này phù hợp với môi trường có tài nguyên hạn chế, thiết bị biên và yêu cầu thời gian huấn luyện nhanh hơn.",
339
340
  "meta/Llama-3.2-11B-Vision-Instruct.description": "Khả năng suy luận hình ảnh mạnh mẽ trên ảnh độ phân giải cao, phù hợp cho các ứng dụng hiểu thị giác.",
340
341
  "meta/Llama-3.2-90B-Vision-Instruct.description": "Khả năng suy luận hình ảnh tiên tiến dành cho các ứng dụng tác tử hiểu thị giác.",
@@ -127,6 +127,10 @@
127
127
  "llm.proxyUrl.title": "URL proxy API",
128
128
  "llm.waitingForMore": "Sẽ có thêm <1>mô hình được bổ sung</1>, hãy đón chờ",
129
129
  "llm.waitingForMoreLinkAriaLabel": "Mở biểu mẫu yêu cầu Nhà cung cấp",
130
+ "marketPublish.forkConfirm.by": "bởi {{author}}",
131
+ "marketPublish.forkConfirm.confirm": "Xác nhận Đăng tải",
132
+ "marketPublish.forkConfirm.description": "Bạn sắp đăng tải một phiên bản phái sinh dựa trên một tác nhân hiện có từ cộng đồng. Tác nhân mới của bạn sẽ được tạo thành một mục riêng biệt trên chợ ứng dụng.",
133
+ "marketPublish.forkConfirm.title": "Đăng tải Tác nhân Phái sinh",
130
134
  "marketPublish.modal.changelog.extra": "Mô tả những thay đổi và cải tiến chính trong phiên bản này",
131
135
  "marketPublish.modal.changelog.label": "Nhật ký thay đổi",
132
136
  "marketPublish.modal.changelog.maxLengthError": "Nhật ký thay đổi không được vượt quá 500 ký tự",
@@ -524,6 +528,9 @@
524
528
  "tools.klavis.servers": "máy chủ",
525
529
  "tools.klavis.tools": "công cụ",
526
530
  "tools.klavis.verifyAuth": "Tôi đã hoàn tất xác thực",
531
+ "tools.lobehubSkill.authorize": "Ủy quyền",
532
+ "tools.lobehubSkill.connect": "Kết nối",
533
+ "tools.lobehubSkill.error": "Lỗi",
527
534
  "tools.notInstalled": "Chưa Cài Đặt",
528
535
  "tools.notInstalledWarning": "Kỹ năng này hiện chưa được cài đặt, điều này có thể ảnh hưởng đến chức năng của tác nhân.",
529
536
  "tools.plugins.enabled": "Đã bật: {{num}}",
@@ -413,6 +413,52 @@
413
413
  "deepseek_r1_distill_llama_70b.description": "DeepSeek-R1-Distill-Llama-70B 是基于 Llama-3.3-70B-Instruct 蒸馏而成。作为 DeepSeek-R1 系列的一部分,使用 DeepSeek-R1 生成的样本进行微调,在数学、编程和推理方面表现出色。",
414
414
  "deepseek_r1_distill_qwen_14b.description": "DeepSeek-R1-Distill-Qwen-14B 是基于 Qwen2.5-14B 蒸馏而成,并使用 DeepSeek-R1 生成的 80 万高质量样本进行微调,具备强大的推理能力。",
415
415
  "deepseek_r1_distill_qwen_32b.description": "DeepSeek-R1-Distill-Qwen-32B 是基于 Qwen2.5-32B 蒸馏而成,并使用 DeepSeek-R1 生成的 80 万高质量样本进行微调,在数学、编程和推理方面表现卓越。",
416
+ "devstral-2:123b.description": "Devstral 2 123B 擅长使用工具探索代码库、编辑多个文件,并支持软件工程代理。",
417
+ "doubao-1.5-lite-32k.description": "Doubao-1.5-lite 是一款全新轻量级模型,响应速度极快,兼具卓越质量与低延迟。",
418
+ "doubao-1.5-pro-256k.description": "Doubao-1.5-pro-256k 是 Doubao-1.5-Pro 的全面升级版,整体性能提升 10%。支持 256k 上下文窗口和最多 12k 输出 token,性能更强、窗口更大,适用于更广泛的场景。",
419
+ "doubao-1.5-pro-32k.description": "Doubao-1.5-pro 是新一代旗舰模型,全面升级,在知识、编程和推理方面表现出色。",
420
+ "doubao-1.5-thinking-pro-m.description": "Doubao-1.5 是一款全新的深度推理模型(m 版本原生支持多模态深度推理),在数学、编程、科学推理以及创意写作等通用任务中表现卓越。其在 AIME 2024、Codeforces 和 GPQA 等基准测试中达到或接近顶级水平。支持 128k 上下文窗口和 16k 输出。",
421
+ "doubao-1.5-thinking-pro.description": "Doubao-1.5 是一款全新的深度推理模型,在数学、编程、科学推理以及创意写作等通用任务中表现卓越。其在 AIME 2024、Codeforces 和 GPQA 等基准测试中达到或接近顶级水平。支持 128k 上下文窗口和 16k 输出。",
422
+ "doubao-1.5-thinking-vision-pro.description": "全新视觉深度推理模型,具备更强的多模态理解与推理能力,在 59 个公开基准中有 37 项达到 SOTA 水平。",
423
+ "doubao-1.5-ui-tars.description": "Doubao-1.5-UI-TARS 是一款原生面向图形界面的代理模型,具备类人感知、推理与操作能力,可与界面无缝交互。",
424
+ "doubao-1.5-vision-lite.description": "Doubao-1.5-vision-lite 是升级版多模态模型,支持任意分辨率和极端长宽比图像,提升视觉推理、文档识别、细节理解与指令遵循能力。支持 128k 上下文窗口和最多 16k 输出 token。",
425
+ "doubao-1.5-vision-pro-32k.description": "Doubao-1.5-vision-pro 是升级版多模态模型,支持任意分辨率和极端长宽比图像,提升视觉推理、文档识别、细节理解与指令遵循能力。",
426
+ "doubao-1.5-vision-pro.description": "Doubao-1.5-vision-pro 是升级版多模态模型,支持任意分辨率和极端长宽比图像,提升视觉推理、文档识别、细节理解与指令遵循能力。",
427
+ "doubao-lite-128k.description": "超快响应,性价比更高,适用于多种场景,支持推理与微调,具备 128k 上下文窗口。",
428
+ "doubao-lite-32k.description": "超快响应,性价比更高,适用于多种场景,支持推理与微调,具备 32k 上下文窗口。",
429
+ "doubao-lite-4k.description": "超快响应,性价比更高,适用于多种场景,支持推理与微调,具备 4k 上下文窗口。",
430
+ "doubao-pro-256k.description": "性能最强的旗舰模型,适用于复杂任务,在参考问答、摘要、创作、文本分类和角色扮演等方面表现优异。支持推理与微调,具备 256k 上下文窗口。",
431
+ "doubao-pro-32k.description": "性能最强的旗舰模型,适用于复杂任务,在参考问答、摘要、创作、文本分类和角色扮演等方面表现优异。支持推理与微调,具备 32k 上下文窗口。",
432
+ "doubao-seed-1.6-flash.description": "Doubao-Seed-1.6-flash 是一款超快多模态深度推理模型,TPOT 低至 10ms,支持文本与图像输入,在文本理解上超越前代 lite 模型,在视觉方面媲美主流 pro 模型。支持 256k 上下文窗口和最多 16k 输出 token。",
433
+ "doubao-seed-1.6-lite.description": "Doubao-Seed-1.6-lite 是一款全新多模态深度推理模型,支持可调推理强度(最小、低、中、高),性价比更高,是通用任务的优选,支持最长 256k 上下文窗口。",
434
+ "doubao-seed-1.6-thinking.description": "Doubao-Seed-1.6-thinking 在推理能力上显著增强,相较 Doubao-1.5-thinking-pro 在编程、数学和逻辑推理方面进一步提升,并新增视觉理解能力。支持 256k 上下文窗口和最多 16k 输出 token。",
435
+ "doubao-seed-1.6-vision.description": "Doubao-Seed-1.6-vision 是一款视觉深度推理模型,具备更强的多模态理解与推理能力,适用于教育、图像审核、安检和 AI 搜索问答等场景。支持 256k 上下文窗口和最多 64k 输出 token。",
436
+ "doubao-seed-1.6.description": "Doubao-Seed-1.6 是一款全新多模态深度推理模型,支持自动、思考与非思考模式。在非思考模式下,其性能显著优于 Doubao-1.5-pro/250115。支持 256k 上下文窗口和最多 16k 输出 token。",
437
+ "doubao-seed-1.8.description": "Doubao-Seed-1.8 拥有更强的多模态理解能力与 Agent 能力,支持文本/图像/视频输入与上下文缓存,在复杂任务中表现更加出色。",
438
+ "doubao-seed-code.description": "Doubao-Seed-Code 针对代理式编程深度优化,支持多模态输入(文本/图像/视频)和 256k 上下文窗口,兼容 Anthropic API,适用于编程、视觉理解与代理工作流。",
439
+ "doubao-seededit-3-0-i2i-250628.description": "字节跳动 Seed 推出的 Doubao 图像模型,支持文本与图像输入,具备高度可控的高质量图像生成能力。支持文本引导的图像编辑,输出尺寸长边在 512 至 1536 之间。",
440
+ "doubao-seedream-3-0-t2i-250415.description": "Seedream 3.0 是字节跳动 Seed 推出的图像生成模型,支持文本与图像输入,具备高度可控的高质量图像生成能力。可根据文本提示生成图像。",
441
+ "doubao-seedream-4-0-250828.description": "Seedream 4.0 是字节跳动 Seed 推出的图像生成模型,支持文本与图像输入,具备高度可控的高质量图像生成能力。可根据文本提示生成图像。",
442
+ "doubao-vision-lite-32k.description": "Doubao-vision 是 Doubao 推出的多模态模型,具备强大的图像理解与推理能力,并能精准执行指令。在图文提取与基于图像的推理任务中表现优异,支持更复杂、更广泛的视觉问答场景。",
443
+ "doubao-vision-pro-32k.description": "Doubao-vision 是 Doubao 推出的多模态模型,具备强大的图像理解与推理能力,并能精准执行指令。在图文提取与基于图像的推理任务中表现优异,支持更复杂、更广泛的视觉问答场景。",
444
+ "emohaa.description": "Emohaa 是一款心理健康模型,具备专业咨询能力,帮助用户理解情绪问题。",
445
+ "ernie-4.5-0.3b.description": "ERNIE 4.5 0.3B 是一款开源轻量级模型,适用于本地和定制化部署。",
446
+ "ernie-4.5-21b-a3b.description": "ERNIE 4.5 21B A3B 是一款开源大参数模型,具备更强的理解与生成能力。",
447
+ "ernie-4.5-300b-a47b.description": "ERNIE 4.5 300B A47B 是百度 ERNIE 的超大规模 MoE 模型,推理能力卓越。",
448
+ "ernie-4.5-8k-preview.description": "ERNIE 4.5 8K Preview 是一款用于评估 ERNIE 4.5 的 8K 上下文预览模型。",
449
+ "ernie-4.5-turbo-128k-preview.description": "ERNIE 4.5 Turbo 128K 预览版,具备发布级能力,适用于集成与灰度测试。",
450
+ "ernie-4.5-turbo-128k.description": "ERNIE 4.5 Turbo 128K 是一款高性能通用模型,支持搜索增强与工具调用,适用于问答、编程与代理场景。",
451
+ "ernie-4.5-turbo-32k.description": "ERNIE 4.5 Turbo 32K 是一款中等长度上下文版本,适用于问答、知识库检索与多轮对话。",
452
+ "ernie-4.5-turbo-latest.description": "最新 ERNIE 4.5 Turbo,整体性能优化,适合作为主力生产模型。",
453
+ "ernie-4.5-turbo-vl-32k-preview.description": "ERNIE 4.5 Turbo VL 32K Preview 是一款 32K 多模态预览模型,用于评估长上下文视觉能力。",
454
+ "ernie-4.5-turbo-vl-32k.description": "ERNIE 4.5 Turbo VL 32K 是一款中长上下文多模态模型,适用于长文档与图像联合理解。",
455
+ "ernie-4.5-turbo-vl-latest.description": "ERNIE 4.5 Turbo VL 最新版,图文理解与推理能力进一步提升。",
456
+ "ernie-4.5-turbo-vl-preview.description": "ERNIE 4.5 Turbo VL Preview 是一款多模态预览模型,适用于图文理解与生成,支持视觉问答与内容理解。",
457
+ "ernie-4.5-turbo-vl.description": "ERNIE 4.5 Turbo VL 是一款成熟的多模态模型,适用于生产级图文理解与识别。",
458
+ "ernie-4.5-vl-28b-a3b.description": "ERNIE 4.5 VL 28B A3B 是一款开源多模态模型,支持图文理解与推理。",
459
+ "ernie-5.0-thinking-latest.description": "文心 5.0 Thinking 是一款原生全模态旗舰模型,统一建模文本、图像、音频与视频,在复杂问答、创作与智能体场景中实现全面能力升级。",
460
+ "ernie-5.0-thinking-preview.description": "文心 5.0 Thinking Preview 是一款原生全模态旗舰模型,统一建模文本、图像、音频与视频,在复杂问答、创作与智能体场景中实现全面能力升级。",
461
+ "ernie-char-8k.description": "ERNIE Character 8K 是一款角色对话模型,适用于 IP 角色构建与长期陪伴聊天。",
416
462
  "gemini-flash-latest.description": "Latest release of Gemini Flash",
417
463
  "gemini-flash-lite-latest.description": "Latest release of Gemini Flash-Lite",
418
464
  "gemini-pro-latest.description": "Latest release of Gemini Pro",
@@ -99,6 +99,7 @@
99
99
  "ModelSwitchPanel.goToSettings": "前往設定",
100
100
  "ModelSwitchPanel.manageProvider": "管理提供者",
101
101
  "ModelSwitchPanel.provider": "提供商",
102
+ "ModelSwitchPanel.searchPlaceholder": "搜尋模型...",
102
103
  "ModelSwitchPanel.title": "模型",
103
104
  "ModelSwitchPanel.useModelFrom": "使用此模型來自:",
104
105
  "MultiImagesUpload.actions.uploadMore": "點擊或拖曳上傳更多",
@@ -37,6 +37,7 @@
37
37
  "header.actions.notionGuide.title": "匯入 Notion 內容",
38
38
  "header.actions.uploadFile": "上傳檔案",
39
39
  "header.actions.uploadFolder": "上傳資料夾",
40
+ "header.actions.uploadFolder.creatingFolders": "正在建立資料夾結構...",
40
41
  "header.newPageButton": "新增文件",
41
42
  "header.uploadButton": "上傳",
42
43
  "home.getStarted": "開始使用",
@@ -119,6 +120,8 @@
119
120
  "title": "資源",
120
121
  "toggleLeftPanel": "顯示/隱藏左側面板",
121
122
  "uploadDock.body.collapse": "收起",
123
+ "uploadDock.body.item.cancel": "取消",
124
+ "uploadDock.body.item.cancelled": "已取消",
122
125
  "uploadDock.body.item.done": "已上傳",
123
126
  "uploadDock.body.item.error": "上傳失敗,請重試",
124
127
  "uploadDock.body.item.pending": "準備上傳...",
@@ -126,6 +129,7 @@
126
129
  "uploadDock.body.item.restTime": "剩餘 {{time}}",
127
130
  "uploadDock.fileQueueInfo": "正在上傳前 {{count}} 個檔案,剩餘 {{remaining}} 個檔案將排隊上傳",
128
131
  "uploadDock.totalCount": "共 {{count}} 項",
132
+ "uploadDock.uploadStatus.cancelled": "上傳已取消",
129
133
  "uploadDock.uploadStatus.error": "上傳出錯",
130
134
  "uploadDock.uploadStatus.pending": "等待上傳",
131
135
  "uploadDock.uploadStatus.processing": "正在上傳",