@lobehub/chat 1.21.15 โ 1.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/Dockerfile +16 -8
- package/Dockerfile.database +20 -9
- package/README.zh-CN.md +8 -6
- package/docs/self-hosting/environment-variables.mdx +71 -0
- package/docs/usage/providers/wenxin.mdx +4 -3
- package/docs/usage/providers/wenxin.zh-CN.mdx +4 -3
- package/locales/ar/error.json +1 -0
- package/locales/ar/modelProvider.json +7 -0
- package/locales/ar/models.json +18 -6
- package/locales/ar/providers.json +3 -0
- package/locales/bg-BG/error.json +1 -0
- package/locales/bg-BG/modelProvider.json +7 -0
- package/locales/bg-BG/models.json +18 -6
- package/locales/bg-BG/providers.json +3 -0
- package/locales/de-DE/error.json +1 -0
- package/locales/de-DE/modelProvider.json +7 -0
- package/locales/de-DE/models.json +18 -6
- package/locales/de-DE/providers.json +3 -0
- package/locales/en-US/error.json +1 -0
- package/locales/en-US/modelProvider.json +7 -0
- package/locales/en-US/models.json +18 -6
- package/locales/en-US/providers.json +3 -0
- package/locales/es-ES/error.json +1 -0
- package/locales/es-ES/modelProvider.json +7 -0
- package/locales/es-ES/models.json +18 -6
- package/locales/es-ES/providers.json +3 -0
- package/locales/fr-FR/error.json +1 -0
- package/locales/fr-FR/modelProvider.json +7 -0
- package/locales/fr-FR/models.json +17 -5
- package/locales/fr-FR/providers.json +3 -0
- package/locales/it-IT/error.json +1 -0
- package/locales/it-IT/modelProvider.json +7 -0
- package/locales/it-IT/models.json +18 -6
- package/locales/it-IT/providers.json +3 -0
- package/locales/ja-JP/error.json +1 -0
- package/locales/ja-JP/modelProvider.json +7 -0
- package/locales/ja-JP/models.json +18 -6
- package/locales/ja-JP/providers.json +3 -0
- package/locales/ko-KR/error.json +1 -0
- package/locales/ko-KR/modelProvider.json +7 -0
- package/locales/ko-KR/models.json +17 -5
- package/locales/ko-KR/providers.json +3 -0
- package/locales/nl-NL/error.json +1 -0
- package/locales/nl-NL/modelProvider.json +7 -0
- package/locales/nl-NL/models.json +17 -5
- package/locales/nl-NL/providers.json +3 -0
- package/locales/pl-PL/error.json +1 -0
- package/locales/pl-PL/modelProvider.json +7 -0
- package/locales/pl-PL/models.json +18 -6
- package/locales/pl-PL/providers.json +3 -0
- package/locales/pt-BR/error.json +1 -0
- package/locales/pt-BR/modelProvider.json +7 -0
- package/locales/pt-BR/models.json +18 -6
- package/locales/pt-BR/providers.json +3 -0
- package/locales/ru-RU/error.json +1 -0
- package/locales/ru-RU/modelProvider.json +7 -0
- package/locales/ru-RU/models.json +18 -6
- package/locales/ru-RU/providers.json +3 -0
- package/locales/tr-TR/error.json +1 -0
- package/locales/tr-TR/modelProvider.json +7 -0
- package/locales/tr-TR/models.json +18 -6
- package/locales/tr-TR/providers.json +3 -0
- package/locales/vi-VN/error.json +1 -0
- package/locales/vi-VN/modelProvider.json +7 -0
- package/locales/vi-VN/models.json +18 -6
- package/locales/vi-VN/providers.json +3 -0
- package/locales/zh-CN/error.json +2 -1
- package/locales/zh-CN/modelProvider.json +8 -1
- package/locales/zh-CN/models.json +16 -4
- package/locales/zh-CN/providers.json +3 -0
- package/locales/zh-TW/error.json +1 -0
- package/locales/zh-TW/modelProvider.json +7 -0
- package/locales/zh-TW/models.json +16 -4
- package/locales/zh-TW/providers.json +3 -0
- package/package.json +5 -3
- package/src/app/(main)/settings/llm/ProviderList/HuggingFace/index.tsx +53 -0
- package/src/app/(main)/settings/llm/ProviderList/providers.tsx +12 -1
- package/src/config/llm.ts +10 -0
- package/src/config/modelProviders/huggingface.ts +50 -0
- package/src/config/modelProviders/index.ts +4 -0
- package/src/const/settings/llm.ts +5 -0
- package/src/features/Conversation/Error/index.tsx +1 -0
- package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
- package/src/libs/agent-runtime/error.ts +1 -0
- package/src/libs/agent-runtime/groq/index.ts +1 -1
- package/src/libs/agent-runtime/huggingface/index.ts +48 -0
- package/src/libs/agent-runtime/types/type.ts +1 -0
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +58 -20
- package/src/libs/agent-runtime/utils/streams/openai.test.ts +78 -7
- package/src/libs/agent-runtime/utils/streams/openai.ts +38 -5
- package/src/libs/agent-runtime/utils/streams/protocol.ts +63 -4
- package/src/locales/default/error.ts +2 -2
- package/src/locales/default/modelProvider.ts +8 -1
- package/src/server/globalConfig/index.ts +12 -1
- package/src/server/modules/AgentRuntime/index.ts +10 -0
- package/src/services/_url.ts +4 -5
- package/src/types/user/settings/keyVaults.ts +1 -0
- /package/src/app/(backend)/{api โ webapi}/chat/[provider]/route.test.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/[provider]/route.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/anthropic/route.test.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/anthropic/route.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/google/route.test.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/google/route.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/minimax/route.test.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/minimax/route.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/models/[provider]/route.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/openai/route.test.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/openai/route.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/wenxin/route.test.ts +0 -0
- /package/src/app/(backend)/{api โ webapi}/chat/wenxin/route.ts +0 -0
package/locales/en-US/error.json
CHANGED
@@ -67,6 +67,7 @@
|
|
67
67
|
"OllamaBizError": "Error requesting Ollama service, please troubleshoot or retry based on the following information",
|
68
68
|
"OllamaServiceUnavailable": "Ollama service is unavailable. Please check if Ollama is running properly or if the cross-origin configuration of Ollama is set correctly.",
|
69
69
|
"OpenAIBizError": "Error requesting OpenAI service, please troubleshoot or retry based on the following information",
|
70
|
+
"PermissionDenied": "Sorry, you do not have permission to access this service. Please check if your key has the necessary access rights.",
|
70
71
|
"PluginApiNotFound": "Sorry, the API does not exist in the plugin's manifest. Please check if your request method matches the plugin manifest API",
|
71
72
|
"PluginApiParamsError": "Sorry, the input parameter validation for the plugin request failed. Please check if the input parameters match the API description",
|
72
73
|
"PluginFailToTransformArguments": "Sorry, the plugin failed to parse the arguments. Please try regenerating the assistant message or switch to a more powerful AI model with Tools Calling capability and try again",
|
@@ -58,6 +58,13 @@
|
|
58
58
|
"title": "GitHub PAT"
|
59
59
|
}
|
60
60
|
},
|
61
|
+
"huggingface": {
|
62
|
+
"accessToken": {
|
63
|
+
"desc": "Enter your HuggingFace Token, click [here](https://huggingface.co/settings/tokens) to create one",
|
64
|
+
"placeholder": "hf_xxxxxxxxx",
|
65
|
+
"title": "HuggingFace Token"
|
66
|
+
}
|
67
|
+
},
|
61
68
|
"ollama": {
|
62
69
|
"checker": {
|
63
70
|
"desc": "Test if the proxy address is correctly filled in",
|
@@ -458,6 +458,9 @@
|
|
458
458
|
"google/gemma-2-27b-it": {
|
459
459
|
"description": "Gemma 2 continues the design philosophy of being lightweight and efficient."
|
460
460
|
},
|
461
|
+
"google/gemma-2-2b-it": {
|
462
|
+
"description": "Google's lightweight instruction-tuning model."
|
463
|
+
},
|
461
464
|
"google/gemma-2-9b-it": {
|
462
465
|
"description": "Gemma 2 is Google's lightweight open-source text model series."
|
463
466
|
},
|
@@ -589,6 +592,12 @@
|
|
589
592
|
"llama-3.1-sonar-small-128k-online": {
|
590
593
|
"description": "Llama 3.1 Sonar Small Online model, featuring 8B parameters, supports a context length of approximately 127,000 tokens, designed for online chat, efficiently handling various text interactions."
|
591
594
|
},
|
595
|
+
"llama-3.2-11b-vision-preview": {
|
596
|
+
"description": "Llama 3.2 is designed to handle tasks that combine visual and textual data. It excels in tasks such as image description and visual question answering, bridging the gap between language generation and visual reasoning."
|
597
|
+
},
|
598
|
+
"llama-3.2-90b-vision-preview": {
|
599
|
+
"description": "Llama 3.2 is designed to handle tasks that combine visual and textual data. It excels in tasks such as image description and visual question answering, bridging the gap between language generation and visual reasoning."
|
600
|
+
},
|
592
601
|
"llama3-70b-8192": {
|
593
602
|
"description": "Meta Llama 3 70B provides unparalleled complexity handling capabilities, tailored for high-demand projects."
|
594
603
|
},
|
@@ -643,6 +652,9 @@
|
|
643
652
|
"meta-llama/Llama-2-13b-chat-hf": {
|
644
653
|
"description": "LLaMA-2 Chat (13B) offers excellent language processing capabilities and outstanding interactive experiences."
|
645
654
|
},
|
655
|
+
"meta-llama/Llama-2-7b-chat-hf": {
|
656
|
+
"description": "One of the best conversational models."
|
657
|
+
},
|
646
658
|
"meta-llama/Llama-3-70b-chat-hf": {
|
647
659
|
"description": "LLaMA-3 Chat (70B) is a powerful chat model that supports complex conversational needs."
|
648
660
|
},
|
@@ -811,8 +823,8 @@
|
|
811
823
|
"open-mixtral-8x7b": {
|
812
824
|
"description": "Mixtral 8x7B is a sparse expert model that leverages multiple parameters to enhance reasoning speed, suitable for handling multilingual and code generation tasks."
|
813
825
|
},
|
814
|
-
"openai/gpt-4o
|
815
|
-
"description": "ChatGPT-4o is a dynamic model that updates in real-time to maintain the latest version. It combines powerful language understanding and generation capabilities,
|
826
|
+
"openai/gpt-4o": {
|
827
|
+
"description": "ChatGPT-4o is a dynamic model that updates in real-time to maintain the latest version. It combines powerful language understanding and generation capabilities, suitable for large-scale application scenarios, including customer service, education, and technical support."
|
816
828
|
},
|
817
829
|
"openai/gpt-4o-mini": {
|
818
830
|
"description": "GPT-4o mini is the latest model released by OpenAI following GPT-4 Omni, supporting both text and image input while outputting text. As their most advanced small model, it is significantly cheaper than other recent cutting-edge models and over 60% cheaper than GPT-3.5 Turbo. It maintains state-of-the-art intelligence while offering remarkable cost-effectiveness. GPT-4o mini scored 82% on the MMLU test and currently ranks higher than GPT-4 in chat preferences."
|
@@ -862,11 +874,11 @@
|
|
862
874
|
"qwen-vl-chat-v1": {
|
863
875
|
"description": "Qwen VL supports flexible interaction methods, including multi-image, multi-turn Q&A, and creative capabilities."
|
864
876
|
},
|
865
|
-
"qwen-vl-max": {
|
866
|
-
"description": "
|
877
|
+
"qwen-vl-max-latest": {
|
878
|
+
"description": "Tongyi Qianwen's ultra-large-scale visual language model. Compared to the enhanced version, it further improves visual reasoning and instruction-following abilities, providing a higher level of visual perception and cognition."
|
867
879
|
},
|
868
|
-
"qwen-vl-plus": {
|
869
|
-
"description": "
|
880
|
+
"qwen-vl-plus-latest": {
|
881
|
+
"description": "Tongyi Qianwen's large-scale visual language model enhanced version. Significantly improves detail recognition and text recognition capabilities, supporting ultra-high pixel resolution and images of any aspect ratio."
|
870
882
|
},
|
871
883
|
"qwen-vl-v1": {
|
872
884
|
"description": "Initialized with the Qwen-7B language model, this pre-trained model adds an image model with an input resolution of 448."
|
@@ -30,6 +30,9 @@
|
|
30
30
|
"groq": {
|
31
31
|
"description": "Groq's LPU inference engine has excelled in the latest independent large language model (LLM) benchmarks, redefining the standards for AI solutions with its remarkable speed and efficiency. Groq represents instant inference speed, demonstrating strong performance in cloud-based deployments."
|
32
32
|
},
|
33
|
+
"huggingface": {
|
34
|
+
"description": "The HuggingFace Inference API provides a fast and free way for you to explore thousands of models for various tasks. Whether you are prototyping for a new application or experimenting with the capabilities of machine learning, this API gives you instant access to high-performance models across multiple domains."
|
35
|
+
},
|
33
36
|
"hunyuan": {
|
34
37
|
"description": "A large language model developed by Tencent, equipped with powerful Chinese creative capabilities, logical reasoning abilities in complex contexts, and reliable task execution skills."
|
35
38
|
},
|
package/locales/es-ES/error.json
CHANGED
@@ -67,6 +67,7 @@
|
|
67
67
|
"OllamaBizError": "Error al solicitar el servicio de Ollama, por favor verifica la siguiente informaciรณn o intรฉntalo de nuevo",
|
68
68
|
"OllamaServiceUnavailable": "El servicio Ollama no estรก disponible. Por favor, verifica si Ollama estรก funcionando correctamente o si la configuraciรณn de Ollama para el acceso entre dominios estรก configurada correctamente.",
|
69
69
|
"OpenAIBizError": "Se produjo un error al solicitar el servicio de OpenAI, por favor, revise la siguiente informaciรณn o intรฉntelo de nuevo",
|
70
|
+
"PermissionDenied": "Lo sentimos, no tienes permiso para acceder a este servicio. Por favor, verifica si tu clave tiene los permisos necesarios.",
|
70
71
|
"PluginApiNotFound": "Lo sentimos, el API especificado no existe en el manifiesto del complemento. Verifique si su mรฉtodo de solicitud coincide con el API del manifiesto del complemento",
|
71
72
|
"PluginApiParamsError": "Lo sentimos, la validaciรณn de los parรกmetros de entrada de la solicitud del complemento no ha pasado. Verifique si los parรกmetros de entrada coinciden con la informaciรณn de descripciรณn del API",
|
72
73
|
"PluginFailToTransformArguments": "Lo siento, no se pudieron transformar los argumentos de la llamada al plugin. Por favor, intenta generar de nuevo el mensaje del asistente o prueba con un modelo de IA de Tools Calling mรกs potente.",
|
@@ -58,6 +58,13 @@
|
|
58
58
|
"title": "GitHub PAT"
|
59
59
|
}
|
60
60
|
},
|
61
|
+
"huggingface": {
|
62
|
+
"accessToken": {
|
63
|
+
"desc": "Introduce tu token de HuggingFace, haz clic [aquรญ](https://huggingface.co/settings/tokens) para crear uno",
|
64
|
+
"placeholder": "hf_xxxxxxxxx",
|
65
|
+
"title": "Token de HuggingFace"
|
66
|
+
}
|
67
|
+
},
|
61
68
|
"ollama": {
|
62
69
|
"checker": {
|
63
70
|
"desc": "Prueba si la direcciรณn del proxy de la interfaz se ha introducido correctamente",
|
@@ -458,6 +458,9 @@
|
|
458
458
|
"google/gemma-2-27b-it": {
|
459
459
|
"description": "Gemma 2 continรบa con el concepto de diseรฑo ligero y eficiente."
|
460
460
|
},
|
461
|
+
"google/gemma-2-2b-it": {
|
462
|
+
"description": "Modelo de ajuste de instrucciones ligero de Google."
|
463
|
+
},
|
461
464
|
"google/gemma-2-9b-it": {
|
462
465
|
"description": "Gemma 2 es una serie de modelos de texto de cรณdigo abierto y ligeros de Google."
|
463
466
|
},
|
@@ -589,6 +592,12 @@
|
|
589
592
|
"llama-3.1-sonar-small-128k-online": {
|
590
593
|
"description": "El modelo Llama 3.1 Sonar Small Online, con 8B de parรกmetros, soporta una longitud de contexto de aproximadamente 127,000 tokens, diseรฑado para chat en lรญnea, capaz de manejar eficientemente diversas interacciones textuales."
|
591
594
|
},
|
595
|
+
"llama-3.2-11b-vision-preview": {
|
596
|
+
"description": "Llama 3.2 estรก diseรฑado para manejar tareas que combinan datos visuales y textuales. Destaca en tareas como la descripciรณn de imรกgenes y preguntas visuales, cruzando la brecha entre la generaciรณn de lenguaje y el razonamiento visual."
|
597
|
+
},
|
598
|
+
"llama-3.2-90b-vision-preview": {
|
599
|
+
"description": "Llama 3.2 estรก diseรฑado para manejar tareas que combinan datos visuales y textuales. Destaca en tareas como la descripciรณn de imรกgenes y preguntas visuales, cruzando la brecha entre la generaciรณn de lenguaje y el razonamiento visual."
|
600
|
+
},
|
592
601
|
"llama3-70b-8192": {
|
593
602
|
"description": "Meta Llama 3 70B proporciona una capacidad de procesamiento de complejidad inigualable, diseรฑado a medida para proyectos de alta demanda."
|
594
603
|
},
|
@@ -643,6 +652,9 @@
|
|
643
652
|
"meta-llama/Llama-2-13b-chat-hf": {
|
644
653
|
"description": "LLaMA-2 Chat (13B) ofrece una excelente capacidad de procesamiento de lenguaje y una experiencia de interacciรณn sobresaliente."
|
645
654
|
},
|
655
|
+
"meta-llama/Llama-2-7b-chat-hf": {
|
656
|
+
"description": "Uno de los mejores modelos de conversaciรณn."
|
657
|
+
},
|
646
658
|
"meta-llama/Llama-3-70b-chat-hf": {
|
647
659
|
"description": "LLaMA-3 Chat (70B) es un modelo de chat potente, que soporta necesidades de conversaciรณn complejas."
|
648
660
|
},
|
@@ -811,8 +823,8 @@
|
|
811
823
|
"open-mixtral-8x7b": {
|
812
824
|
"description": "Mixtral 8x7B es un modelo de expertos dispersos que utiliza mรบltiples parรกmetros para mejorar la velocidad de razonamiento, adecuado para el procesamiento de tareas de mรบltiples idiomas y generaciรณn de cรณdigo."
|
813
825
|
},
|
814
|
-
"openai/gpt-4o
|
815
|
-
"description": "ChatGPT-4o es un modelo dinรกmico que se actualiza en tiempo real para mantener la versiรณn mรกs actual. Combina una poderosa
|
826
|
+
"openai/gpt-4o": {
|
827
|
+
"description": "ChatGPT-4o es un modelo dinรกmico que se actualiza en tiempo real para mantener la versiรณn mรกs actual. Combina una poderosa comprensiรณn y generaciรณn de lenguaje, adecuado para escenarios de aplicaciรณn a gran escala, incluyendo servicio al cliente, educaciรณn y soporte tรฉcnico."
|
816
828
|
},
|
817
829
|
"openai/gpt-4o-mini": {
|
818
830
|
"description": "GPT-4o mini es el modelo mรกs reciente de OpenAI, lanzado despuรฉs de GPT-4 Omni, que admite entradas de texto e imagen y genera texto como salida. Como su modelo mรกs avanzado de tamaรฑo pequeรฑo, es mucho mรกs econรณmico que otros modelos de vanguardia recientes y mรกs de un 60% mรกs barato que GPT-3.5 Turbo. Mantiene una inteligencia de vanguardia mientras ofrece una relaciรณn calidad-precio notable. GPT-4o mini obtuvo un puntaje del 82% en la prueba MMLU y actualmente se clasifica por encima de GPT-4 en preferencias de chat."
|
@@ -862,11 +874,11 @@
|
|
862
874
|
"qwen-vl-chat-v1": {
|
863
875
|
"description": "Qwen VL admite formas de interacciรณn flexibles, incluyendo mรบltiples imรกgenes, preguntas y respuestas en mรบltiples rondas, y capacidades creativas."
|
864
876
|
},
|
865
|
-
"qwen-vl-max": {
|
866
|
-
"description": "
|
877
|
+
"qwen-vl-max-latest": {
|
878
|
+
"description": "Modelo de lenguaje visual a ultra gran escala Tongyi Qianwen. En comparaciรณn con la versiรณn mejorada, mejora aรบn mรกs la capacidad de razonamiento visual y de seguimiento de instrucciones, ofreciendo un nivel mรกs alto de percepciรณn y cogniciรณn visual."
|
867
879
|
},
|
868
|
-
"qwen-vl-plus": {
|
869
|
-
"description": "
|
880
|
+
"qwen-vl-plus-latest": {
|
881
|
+
"description": "Versiรณn mejorada del modelo de lenguaje visual a gran escala Tongyi Qianwen. Mejora significativamente la capacidad de reconocimiento de detalles y de texto, soportando imรกgenes con resoluciรณn de mรกs de un millรณn de pรญxeles y proporciones de ancho y alto arbitrarias."
|
870
882
|
},
|
871
883
|
"qwen-vl-v1": {
|
872
884
|
"description": "Iniciado con el modelo de lenguaje Qwen-7B, se aรฑade un modelo de imagen, un modelo preentrenado con una resoluciรณn de entrada de imagen de 448."
|
@@ -30,6 +30,9 @@
|
|
30
30
|
"groq": {
|
31
31
|
"description": "El motor de inferencia LPU de Groq ha demostrado un rendimiento excepcional en las pruebas de referencia de modelos de lenguaje de gran tamaรฑo (LLM), redefiniendo los estรกndares de soluciones de IA con su asombrosa velocidad y eficiencia. Groq es un referente en velocidad de inferencia instantรกnea, mostrando un buen rendimiento en implementaciones basadas en la nube."
|
32
32
|
},
|
33
|
+
"huggingface": {
|
34
|
+
"description": "La API de Inferencia de HuggingFace ofrece una forma rรกpida y gratuita de explorar miles de modelos para diversas tareas. Ya sea que estรฉ prototipando una nueva aplicaciรณn o probando las capacidades del aprendizaje automรกtico, esta API le brinda acceso instantรกneo a modelos de alto rendimiento en mรบltiples dominios."
|
35
|
+
},
|
33
36
|
"hunyuan": {
|
34
37
|
"description": "Un modelo de lenguaje desarrollado por Tencent, que posee una poderosa capacidad de creaciรณn en chino, habilidades de razonamiento lรณgico en contextos complejos y una capacidad confiable para ejecutar tareas."
|
35
38
|
},
|
package/locales/fr-FR/error.json
CHANGED
@@ -67,6 +67,7 @@
|
|
67
67
|
"OllamaBizError": "Erreur commerciale lors de la demande de service Ollama, veuillez vรฉrifier les informations ci-dessous ou rรฉessayer",
|
68
68
|
"OllamaServiceUnavailable": "Le service Ollama n'est pas disponible. Veuillez vรฉrifier si Ollama fonctionne correctement ou si la configuration de la communication inter-domaines d'Ollama est correcte.",
|
69
69
|
"OpenAIBizError": "Erreur de service OpenAI. Veuillez vรฉrifier les informations suivantes ou rรฉessayer.",
|
70
|
+
"PermissionDenied": "Dรฉsolรฉ, vous n'avez pas la permission d'accรฉder ร ce service. Veuillez vรฉrifier si votre clรฉ a les droits d'accรจs.",
|
70
71
|
"PluginApiNotFound": "Dรฉsolรฉ, l'API spรฉcifiรฉe n'existe pas dans le manifeste du plugin. Veuillez vรฉrifier que votre mรฉthode de requรชte correspond ร l'API du manifeste du plugin",
|
71
72
|
"PluginApiParamsError": "Dรฉsolรฉ, la validation des paramรจtres d'entrรฉe de la requรชte de ce plugin a รฉchouรฉ. Veuillez vรฉrifier que les paramรจtres d'entrรฉe correspondent aux informations de l'API",
|
72
73
|
"PluginFailToTransformArguments": "Dรฉsolรฉ, รฉchec de la transformation des arguments de l'appel du plugin. Veuillez essayer de rรฉgรฉnรฉrer le message d'assistance ou de changer de modรจle d'IA avec une capacitรฉ d'appel d'outils plus puissante, puis rรฉessayer.",
|
@@ -58,6 +58,13 @@
|
|
58
58
|
"title": "GitHub PAT"
|
59
59
|
}
|
60
60
|
},
|
61
|
+
"huggingface": {
|
62
|
+
"accessToken": {
|
63
|
+
"desc": "Entrez votre jeton HuggingFace, cliquez [ici](https://huggingface.co/settings/tokens) pour en crรฉer un",
|
64
|
+
"placeholder": "hf_xxxxxxxxx",
|
65
|
+
"title": "Jeton HuggingFace"
|
66
|
+
}
|
67
|
+
},
|
61
68
|
"ollama": {
|
62
69
|
"checker": {
|
63
70
|
"desc": "Vรฉrifiez si l'adresse du proxy est correctement saisie",
|
@@ -458,6 +458,9 @@
|
|
458
458
|
"google/gemma-2-27b-it": {
|
459
459
|
"description": "Gemma 2 poursuit le concept de conception lรฉgรจre et efficace."
|
460
460
|
},
|
461
|
+
"google/gemma-2-2b-it": {
|
462
|
+
"description": "Modรจle d'optimisation des instructions lรฉger de Google."
|
463
|
+
},
|
461
464
|
"google/gemma-2-9b-it": {
|
462
465
|
"description": "Gemma 2 est une sรฉrie de modรจles de texte open source allรฉgรฉs de Google."
|
463
466
|
},
|
@@ -589,6 +592,12 @@
|
|
589
592
|
"llama-3.1-sonar-small-128k-online": {
|
590
593
|
"description": "Le modรจle Llama 3.1 Sonar Small Online, avec 8B de paramรจtres, prend en charge une longueur de contexte d'environ 127 000 jetons, conรงu pour le chat en ligne, capable de traiter efficacement diverses interactions textuelles."
|
591
594
|
},
|
595
|
+
"llama-3.2-11b-vision-preview": {
|
596
|
+
"description": "Llama 3.2 est conรงu pour traiter des tรขches combinant des donnรฉes visuelles et textuelles. Il excelle dans des tรขches telles que la description d'images et les questions-rรฉponses visuelles, comblant le fossรฉ entre la gรฉnรฉration de langage et le raisonnement visuel."
|
597
|
+
},
|
598
|
+
"llama-3.2-90b-vision-preview": {
|
599
|
+
"description": "Llama 3.2 est conรงu pour traiter des tรขches combinant des donnรฉes visuelles et textuelles. Il excelle dans des tรขches telles que la description d'images et les questions-rรฉponses visuelles, comblant le fossรฉ entre la gรฉnรฉration de langage et le raisonnement visuel."
|
600
|
+
},
|
592
601
|
"llama3-70b-8192": {
|
593
602
|
"description": "Meta Llama 3 70B offre une capacitรฉ de traitement de complexitรฉ inรฉgalรฉe, sur mesure pour des projets exigeants."
|
594
603
|
},
|
@@ -643,6 +652,9 @@
|
|
643
652
|
"meta-llama/Llama-2-13b-chat-hf": {
|
644
653
|
"description": "LLaMA-2 Chat (13B) offre d'excellentes capacitรฉs de traitement du langage et une expรฉrience interactive exceptionnelle."
|
645
654
|
},
|
655
|
+
"meta-llama/Llama-2-7b-chat-hf": {
|
656
|
+
"description": "L'un des meilleurs modรจles de conversation."
|
657
|
+
},
|
646
658
|
"meta-llama/Llama-3-70b-chat-hf": {
|
647
659
|
"description": "LLaMA-3 Chat (70B) est un modรจle de chat puissant, prenant en charge des besoins de dialogue complexes."
|
648
660
|
},
|
@@ -811,7 +823,7 @@
|
|
811
823
|
"open-mixtral-8x7b": {
|
812
824
|
"description": "Mixtral 8x7B est un modรจle d'expert รฉpars, utilisant plusieurs paramรจtres pour amรฉliorer la vitesse de raisonnement, adaptรฉ au traitement de tรขches multilingues et de gรฉnรฉration de code."
|
813
825
|
},
|
814
|
-
"openai/gpt-4o
|
826
|
+
"openai/gpt-4o": {
|
815
827
|
"description": "ChatGPT-4o est un modรจle dynamique, mis ร jour en temps rรฉel pour rester ร jour avec la derniรจre version. Il combine une comprรฉhension et une gรฉnรฉration de langage puissantes, adaptรฉ ร des scรฉnarios d'application ร grande รฉchelle, y compris le service client, l'รฉducation et le support technique."
|
816
828
|
},
|
817
829
|
"openai/gpt-4o-mini": {
|
@@ -862,11 +874,11 @@
|
|
862
874
|
"qwen-vl-chat-v1": {
|
863
875
|
"description": "Qwen VL prend en charge des modes d'interaction flexibles, y compris la capacitรฉ de poser des questions ร plusieurs images, des dialogues multi-tours, et plus encore."
|
864
876
|
},
|
865
|
-
"qwen-vl-max": {
|
866
|
-
"description": "
|
877
|
+
"qwen-vl-max-latest": {
|
878
|
+
"description": "Modรจle de langage visuel ร trรจs grande รฉchelle Tongyi Qianwen. Par rapport ร la version amรฉliorรฉe, il amรฉliore encore les capacitรฉs de raisonnement visuel et de suivi des instructions, offrant un niveau de perception visuelle et de cognition plus รฉlevรฉ."
|
867
879
|
},
|
868
|
-
"qwen-vl-plus": {
|
869
|
-
"description": "
|
880
|
+
"qwen-vl-plus-latest": {
|
881
|
+
"description": "Version amรฉliorรฉe du modรจle de langage visuel ร grande รฉchelle Tongyi Qianwen. Amรฉlioration significative des capacitรฉs de reconnaissance des dรฉtails et de reconnaissance de texte, prenant en charge des rรฉsolutions d'image de plus d'un million de pixels et des rapports d'aspect de n'importe quelle taille."
|
870
882
|
},
|
871
883
|
"qwen-vl-v1": {
|
872
884
|
"description": "Initialisรฉ avec le modรจle de langage Qwen-7B, ajoutant un modรจle d'image, un modรจle prรฉ-entraรฎnรฉ avec une rรฉsolution d'entrรฉe d'image de 448."
|
@@ -30,6 +30,9 @@
|
|
30
30
|
"groq": {
|
31
31
|
"description": "Le moteur d'infรฉrence LPU de Groq a excellรฉ dans les derniers tests de rรฉfรฉrence des grands modรจles de langage (LLM), redรฉfinissant les normes des solutions IA grรขce ร sa vitesse et son efficacitรฉ impressionnantes. Groq reprรฉsente une vitesse d'infรฉrence instantanรฉe, montrant de bonnes performances dans les dรฉploiements basรฉs sur le cloud."
|
32
32
|
},
|
33
|
+
"huggingface": {
|
34
|
+
"description": "L'API d'infรฉrence HuggingFace offre un moyen rapide et gratuit d'explorer des milliers de modรจles adaptรฉs ร diverses tรขches. Que vous soyez en train de prototyper une nouvelle application ou d'expรฉrimenter les capacitรฉs de l'apprentissage automatique, cette API vous permet d'accรฉder instantanรฉment ร des modรจles performants dans de nombreux domaines."
|
35
|
+
},
|
33
36
|
"hunyuan": {
|
34
37
|
"description": "Un modรจle de langage dรฉveloppรฉ par Tencent, dotรฉ d'une puissante capacitรฉ de crรฉation en chinois, d'une capacitรฉ de raisonnement logique dans des contextes complexes, ainsi que d'une capacitรฉ fiable d'exรฉcution des tรขches."
|
35
38
|
},
|
package/locales/it-IT/error.json
CHANGED
@@ -67,6 +67,7 @@
|
|
67
67
|
"OllamaBizError": "Errore di servizio Ollama, controllare le informazioni seguenti o riprovare",
|
68
68
|
"OllamaServiceUnavailable": "Servizio Ollama non disponibile: controllare che Ollama sia in esecuzione correttamente o che la configurazione di cross-origin di Ollama sia corretta",
|
69
69
|
"OpenAIBizError": "Errore di business di OpenAI. Si prega di controllare le informazioni seguenti o riprovare.",
|
70
|
+
"PermissionDenied": "Ci dispiace, non hai il permesso di accedere a questo servizio. Controlla se la tua chiave ha i diritti di accesso.",
|
70
71
|
"PluginApiNotFound": "Spiacenti, l'API specificata non esiste nel manifesto del plugin. Verifica che il metodo di richiesta corrisponda all'API del manifesto del plugin",
|
71
72
|
"PluginApiParamsError": "Spiacenti, la convalida dei parametri di input della richiesta del plugin non รจ riuscita. Verifica che i parametri di input corrispondano alle informazioni dell'API",
|
72
73
|
"PluginFailToTransformArguments": "Spiacenti, la trasformazione degli argomenti della chiamata al plugin non รจ riuscita. Si prega di provare a rigenerare il messaggio dell'assistente o riprovare dopo aver cambiato il modello AI di Tools Calling con capacitร piรน avanzate.",
|
@@ -58,6 +58,13 @@
|
|
58
58
|
"title": "GitHub PAT"
|
59
59
|
}
|
60
60
|
},
|
61
|
+
"huggingface": {
|
62
|
+
"accessToken": {
|
63
|
+
"desc": "Inserisci il tuo token HuggingFace, clicca [qui](https://huggingface.co/settings/tokens) per crearne uno",
|
64
|
+
"placeholder": "hf_xxxxxxxxx",
|
65
|
+
"title": "Token HuggingFace"
|
66
|
+
}
|
67
|
+
},
|
61
68
|
"ollama": {
|
62
69
|
"checker": {
|
63
70
|
"desc": "Verifica se l'indirizzo del proxy รจ stato compilato correttamente",
|
@@ -458,6 +458,9 @@
|
|
458
458
|
"google/gemma-2-27b-it": {
|
459
459
|
"description": "Gemma 2 continua il concetto di design leggero ed efficiente."
|
460
460
|
},
|
461
|
+
"google/gemma-2-2b-it": {
|
462
|
+
"description": "Modello di ottimizzazione delle istruzioni leggero di Google"
|
463
|
+
},
|
461
464
|
"google/gemma-2-9b-it": {
|
462
465
|
"description": "Gemma 2 รจ una serie di modelli di testo open source leggeri di Google."
|
463
466
|
},
|
@@ -589,6 +592,12 @@
|
|
589
592
|
"llama-3.1-sonar-small-128k-online": {
|
590
593
|
"description": "Il modello Llama 3.1 Sonar Small Online, con 8B parametri, supporta una lunghezza di contesto di circa 127.000 token, progettato per chat online, in grado di gestire interazioni testuali in modo efficiente."
|
591
594
|
},
|
595
|
+
"llama-3.2-11b-vision-preview": {
|
596
|
+
"description": "Llama 3.2 รจ progettato per gestire compiti che combinano dati visivi e testuali. Eccelle in compiti come la descrizione delle immagini e le domande visive, colmando il divario tra generazione del linguaggio e ragionamento visivo."
|
597
|
+
},
|
598
|
+
"llama-3.2-90b-vision-preview": {
|
599
|
+
"description": "Llama 3.2 รจ progettato per gestire compiti che combinano dati visivi e testuali. Eccelle in compiti come la descrizione delle immagini e le domande visive, colmando il divario tra generazione del linguaggio e ragionamento visivo."
|
600
|
+
},
|
592
601
|
"llama3-70b-8192": {
|
593
602
|
"description": "Meta Llama 3 70B offre capacitร di elaborazione della complessitร senza pari, progettato su misura per progetti ad alta richiesta."
|
594
603
|
},
|
@@ -643,6 +652,9 @@
|
|
643
652
|
"meta-llama/Llama-2-13b-chat-hf": {
|
644
653
|
"description": "LLaMA-2 Chat (13B) offre eccellenti capacitร di elaborazione linguistica e un'interazione di alta qualitร ."
|
645
654
|
},
|
655
|
+
"meta-llama/Llama-2-7b-chat-hf": {
|
656
|
+
"description": "Uno dei migliori modelli di conversazione"
|
657
|
+
},
|
646
658
|
"meta-llama/Llama-3-70b-chat-hf": {
|
647
659
|
"description": "LLaMA-3 Chat (70B) รจ un potente modello di chat, in grado di gestire esigenze di dialogo complesse."
|
648
660
|
},
|
@@ -811,8 +823,8 @@
|
|
811
823
|
"open-mixtral-8x7b": {
|
812
824
|
"description": "Mixtral 8x7B รจ un modello di esperti sparsi, che utilizza piรน parametri per aumentare la velocitร di ragionamento, adatto per gestire compiti di generazione di linguaggio e codice multilingue."
|
813
825
|
},
|
814
|
-
"openai/gpt-4o
|
815
|
-
"description": "ChatGPT-4o รจ un modello dinamico, aggiornato in tempo reale per mantenere la versione piรน recente. Combina potenti capacitร di comprensione e generazione del linguaggio, adatto a scenari
|
826
|
+
"openai/gpt-4o": {
|
827
|
+
"description": "ChatGPT-4o รจ un modello dinamico, aggiornato in tempo reale per mantenere la versione piรน recente. Combina potenti capacitร di comprensione e generazione del linguaggio, adatto a scenari di applicazione su larga scala, tra cui assistenza clienti, istruzione e supporto tecnico."
|
816
828
|
},
|
817
829
|
"openai/gpt-4o-mini": {
|
818
830
|
"description": "GPT-4o mini รจ il modello piรน recente di OpenAI, lanciato dopo GPT-4 Omni, che supporta input visivi e testuali e produce output testuali. Come il loro modello di piccole dimensioni piรน avanzato, รจ molto piรน economico rispetto ad altri modelli all'avanguardia recenti e costa oltre il 60% in meno rispetto a GPT-3.5 Turbo. Mantiene un'intelligenza all'avanguardia, offrendo un notevole rapporto qualitร -prezzo. GPT-4o mini ha ottenuto un punteggio dell'82% nel test MMLU e attualmente รจ classificato piรน in alto di GPT-4 per preferenze di chat."
|
@@ -862,11 +874,11 @@
|
|
862
874
|
"qwen-vl-chat-v1": {
|
863
875
|
"description": "Qwen VL supporta modalitร di interazione flessibili, inclusi modelli di domande e risposte multipli e creativi."
|
864
876
|
},
|
865
|
-
"qwen-vl-max": {
|
866
|
-
"description": "
|
877
|
+
"qwen-vl-max-latest": {
|
878
|
+
"description": "Modello di linguaggio visivo Qwen di grande scala. Rispetto alla versione potenziata, migliora ulteriormente la capacitร di ragionamento visivo e di aderenza alle istruzioni, offrendo un livello superiore di percezione visiva e cognizione."
|
867
879
|
},
|
868
|
-
"qwen-vl-plus": {
|
869
|
-
"description": "
|
880
|
+
"qwen-vl-plus-latest": {
|
881
|
+
"description": "Versione potenziata del modello di linguaggio visivo Qwen. Migliora notevolmente la capacitร di riconoscimento dei dettagli e di riconoscimento del testo, supportando risoluzioni superiori a un milione di pixel e immagini di qualsiasi rapporto di aspetto."
|
870
882
|
},
|
871
883
|
"qwen-vl-v1": {
|
872
884
|
"description": "Inizializzato con il modello di linguaggio Qwen-7B, aggiunge un modello di immagine, con una risoluzione di input dell'immagine di 448."
|
@@ -30,6 +30,9 @@
|
|
30
30
|
"groq": {
|
31
31
|
"description": "Il motore di inferenza LPU di Groq ha mostrato prestazioni eccezionali nei recenti benchmark indipendenti sui modelli di linguaggio di grandi dimensioni (LLM), ridefinendo gli standard delle soluzioni AI con la sua incredibile velocitร ed efficienza. Groq rappresenta una velocitร di inferenza istantanea, mostrando buone prestazioni nelle implementazioni basate su cloud."
|
32
32
|
},
|
33
|
+
"huggingface": {
|
34
|
+
"description": "L'API di Inferenza di HuggingFace offre un modo rapido e gratuito per esplorare migliaia di modelli per una varietร di compiti. Che tu stia prototipando una nuova applicazione o cercando di sperimentare le funzionalitร del machine learning, questa API ti consente di accedere immediatamente a modelli ad alte prestazioni in diversi ambiti."
|
35
|
+
},
|
33
36
|
"hunyuan": {
|
34
37
|
"description": "Un modello di linguaggio sviluppato da Tencent, dotato di potenti capacitร di creazione in cinese, abilitร di ragionamento logico in contesti complessi e capacitร affidabili di esecuzione dei compiti."
|
35
38
|
},
|
package/locales/ja-JP/error.json
CHANGED
@@ -67,6 +67,7 @@
|
|
67
67
|
"OllamaBizError": "Ollamaใตใผใในใฎใชใฏใจในใใงใจใฉใผใ็บ็ใใพใใใไปฅไธใฎๆ
ๅ ฑใซๅบใฅใใฆใใฉใใซใทใฅใผใใฃใณใฐใ่กใใใๅๅบฆใ่ฉฆใใใ ใใ",
|
68
68
|
"OllamaServiceUnavailable": "Ollamaใตใผใในใๅฉ็จใงใใพใใใOllamaใๆญฃๅธธใซๅไฝใใฆใใใใใพใใฏOllamaใฎใฏใญในใชใชใธใณ่จญๅฎใๆญฃใใ่กใใใฆใใใใ็ขบ่ชใใฆใใ ใใ",
|
69
69
|
"OpenAIBizError": "ใชใฏใจในใ OpenAI ใตใผใในใงใจใฉใผใ็บ็ใใพใใใไปฅไธใฎๆ
ๅ ฑใ็ขบ่ชใใฆๅ่ฉฆ่กใใฆใใ ใใใ",
|
70
|
+
"PermissionDenied": "็ณใ่จณใใใพใใใใใใฎใตใผใในใซใขใฏใปในใใๆจฉ้ใใใใพใใใใใชใใฎใญใผใซใขใฏใปในๆจฉใใใใใฉใใใ็ขบ่ชใใฆใใ ใใใ",
|
70
71
|
"PluginApiNotFound": "็ณใ่จณใใใพใใใใใใฉใฐใคใณใฎใใใใงในใใซๆๅฎใใใAPIใ่ฆใคใใใพใใใงใใใใชใฏใจในใใกใฝใใใจใใฉใฐใคใณใฎใใใใงในใใฎAPIใไธ่ดใใฆใใใใฉใใใ็ขบ่ชใใฆใใ ใใ",
|
71
72
|
"PluginApiParamsError": "็ณใ่จณใใใพใใใใใใฉใฐใคใณใฎใชใฏใจในใใใฉใกใผใฟใฎๆค่จผใซๅคฑๆใใพใใใใใฉใกใผใฟใจAPIใฎ่ชฌๆใไธ่ดใใฆใใใใฉใใ็ขบ่ชใใฆใใ ใใ",
|
72
73
|
"PluginFailToTransformArguments": "็ณใ่จณใใใพใใใใใใฉใฐใคใณใฎๅผๆฐๅคๆใซๅคฑๆใใพใใใๅฉๆใกใใปใผใธใๅ็ๆใใใใใใๅผทๅใช Tools Calling ๆฉ่ฝใๆใคAIใขใใซใซๅใๆฟใใฆๅ่ฉฆ่กใใฆใใ ใใ",
|
@@ -58,6 +58,13 @@
|
|
58
58
|
"title": "GitHub PAT"
|
59
59
|
}
|
60
60
|
},
|
61
|
+
"huggingface": {
|
62
|
+
"accessToken": {
|
63
|
+
"desc": "ใใชใใฎ HuggingFace ใใผใฏใณใๅ
ฅๅใใฆใใ ใใใ [ใใกใ](https://huggingface.co/settings/tokens) ใใฏใชใใฏใใฆไฝๆใใพใใ",
|
64
|
+
"placeholder": "hf_xxxxxxxxx",
|
65
|
+
"title": "HuggingFace ใใผใฏใณ"
|
66
|
+
}
|
67
|
+
},
|
61
68
|
"ollama": {
|
62
69
|
"checker": {
|
63
70
|
"desc": "ใใญใญใทใขใใฌในใๆญฃใใๅ
ฅๅใใใฆใใใใใในใใใพใ",
|
@@ -458,6 +458,9 @@
|
|
458
458
|
"google/gemma-2-27b-it": {
|
459
459
|
"description": "Gemma 2ใฏใ่ปฝ้ๅใจ้ซๅน็ใฎใใถใคใณ็ๅฟตใ็ถๆฟใใฆใใพใใ"
|
460
460
|
},
|
461
|
+
"google/gemma-2-2b-it": {
|
462
|
+
"description": "Googleใฎ่ปฝ้ๆ็คบ่ชฟๆดใขใใซ"
|
463
|
+
},
|
461
464
|
"google/gemma-2-9b-it": {
|
462
465
|
"description": "Gemma 2ใฏใGoogleใฎ่ปฝ้ใชใผใใณใฝใผในใใญในใใขใใซใทใชใผใบใงใใ"
|
463
466
|
},
|
@@ -589,6 +592,12 @@
|
|
589
592
|
"llama-3.1-sonar-small-128k-online": {
|
590
593
|
"description": "Llama 3.1 Sonar Small Onlineใขใใซใฏใ8Bใใฉใกใผใฟใๆใกใ็ด127,000ใใผใฏใณใฎใณใณใใญในใ้ทใใตใใผใใใใชใณใฉใคใณใใฃใใ็จใซ่จญ่จใใใฆใใใใใพใใพใชใใญในใใคใณใฟใฉใฏใทใงใณใๅน็็ใซๅฆ็ใงใใพใใ"
|
591
594
|
},
|
595
|
+
"llama-3.2-11b-vision-preview": {
|
596
|
+
"description": "Llama 3.2ใฏใ่ฆ่ฆใใผใฟใจใใญในใใใผใฟใ็ตใฟๅใใใใฟในใฏใๅฆ็ใใใใจใ็ฎ็ใจใใฆใใพใใ็ปๅใฎ่ชฌๆใ่ฆ่ฆ็่ณชๅๅฟ็ญใชใฉใฎใฟในใฏใงๅชใใใใใฉใผใใณในใ็บๆฎใใ่จ่ช็ๆใจ่ฆ่ฆๆจ่ซใฎ้ใฎใฎใฃใใใๅใใพใใ"
|
597
|
+
},
|
598
|
+
"llama-3.2-90b-vision-preview": {
|
599
|
+
"description": "Llama 3.2ใฏใ่ฆ่ฆใใผใฟใจใใญในใใใผใฟใ็ตใฟๅใใใใฟในใฏใๅฆ็ใใใใจใ็ฎ็ใจใใฆใใพใใ็ปๅใฎ่ชฌๆใ่ฆ่ฆ็่ณชๅๅฟ็ญใชใฉใฎใฟในใฏใงๅชใใใใใฉใผใใณในใ็บๆฎใใ่จ่ช็ๆใจ่ฆ่ฆๆจ่ซใฎ้ใฎใฎใฃใใใๅใใพใใ"
|
600
|
+
},
|
592
601
|
"llama3-70b-8192": {
|
593
602
|
"description": "Meta Llama 3 70Bใฏใๆฏ้กใฎใชใ่ค้ๆงๅฆ็่ฝๅใๆไพใใ้ซ่ฆๆฑใใญใธใงใฏใใซ็นๅใใฆใใพใใ"
|
594
603
|
},
|
@@ -643,6 +652,9 @@
|
|
643
652
|
"meta-llama/Llama-2-13b-chat-hf": {
|
644
653
|
"description": "LLaMA-2 Chat (13B)ใฏใๅชใใ่จ่ชๅฆ็่ฝๅใจ็ด ๆดใใใใคใณใฟใฉใฏใทใงใณไฝ้จใๆไพใใพใใ"
|
645
654
|
},
|
655
|
+
"meta-llama/Llama-2-7b-chat-hf": {
|
656
|
+
"description": "ๆ้ซใฎๅฏพ่ฉฑใขใใซใฎไธใค"
|
657
|
+
},
|
646
658
|
"meta-llama/Llama-3-70b-chat-hf": {
|
647
659
|
"description": "LLaMA-3 Chat (70B)ใฏใๅผทๅใชใใฃใใใขใใซใงใใใ่ค้ใชๅฏพ่ฉฑใใผใบใใตใใผใใใพใใ"
|
648
660
|
},
|
@@ -811,8 +823,8 @@
|
|
811
823
|
"open-mixtral-8x7b": {
|
812
824
|
"description": "Mixtral 8x7Bใฏใในใใผในใจใญในใใผใใขใใซใงใใใ่คๆฐใฎใใฉใกใผใฟใๅฉ็จใใฆๆจ่ซ้ๅบฆใๅไธใใใๅค่จ่ชใใใณใณใผใ็ๆใฟในใฏใฎๅฆ็ใซ้ฉใใฆใใพใใ"
|
813
825
|
},
|
814
|
-
"openai/gpt-4o
|
815
|
-
"description": "ChatGPT-4o
|
826
|
+
"openai/gpt-4o": {
|
827
|
+
"description": "ChatGPT-4oใฏๅ็ใขใใซใงใๆๆฐใฎใใผใธใงใณใ็ถญๆใใใใใซใชใขใซใฟใคใ ใงๆดๆฐใใใพใใๅผทๅใช่จ่ช็่งฃใจ็ๆ่ฝๅใ็ตใฟๅใใใฆใใใ้กงๅฎขใตใผใในใๆ่ฒใๆ่กใตใใผใใชใฉใฎๅคง่ฆๆจกใชใขใใชใฑใผใทใงใณใทใใชใชใซ้ฉใใฆใใพใใ"
|
816
828
|
},
|
817
829
|
"openai/gpt-4o-mini": {
|
818
830
|
"description": "GPT-4o miniใฏOpenAIใGPT-4 Omniใฎๅพใซ็บ่กจใใๆๆฐใขใใซใงใ็ปๅใจใใญในใใฎๅ
ฅๅใใตใใผใใใใใญในใใๅบๅใใพใใๅฝผใใฎๆๅ
็ซฏใฎๅฐๅใขใใซใจใใฆใๆ่ฟใฎไปใฎๆๅ็ทใขใใซใใใใฏใใใซๅฎไพกใงใGPT-3.5 Turboใใใ60%ไปฅไธๅฎไพกใงใใๆๅ
็ซฏใฎ็ฅ่ฝใ็ถญๆใใคใคใ้ก่ใชใณในใใใใฉใผใใณในใ่ชใใพใใGPT-4o miniใฏMMLUใในใใง82%ใฎในใณใขใ็ฒๅพใใ็พๅจใใฃใใใฎๅฅฝใฟใงGPT-4ใใใ้ซใ่ฉไพกใๅพใฆใใพใใ"
|
@@ -862,11 +874,11 @@
|
|
862
874
|
"qwen-vl-chat-v1": {
|
863
875
|
"description": "้็พฉๅๅVLใฏใ่คๆฐใฎ็ปๅใๅคๆฎต้ใฎ่ณชๅๅฟ็ญใๅตไฝใชใฉใฎๆ่ปใชใคใณใฟใฉใฏใทใงใณๆนๅผใใตใใผใใใใขใใซใงใใ"
|
864
876
|
},
|
865
|
-
"qwen-vl-max": {
|
866
|
-
"description": "
|
877
|
+
"qwen-vl-max-latest": {
|
878
|
+
"description": "้็พฉๅๅใฎ่ถ
ๅคง่ฆๆจก่ฆ่ฆ่จ่ชใขใใซใๅผทๅ็ใซๆฏในใฆใ่ฆ่ฆๆจ่ซ่ฝๅใจๆ็คบ้ตๅฎ่ฝๅใใใใซๅไธใใใใใ้ซใ่ฆ่ฆ่ช่ญใจ่ช็ฅใฌใใซใๆไพใใพใใ"
|
867
879
|
},
|
868
|
-
"qwen-vl-plus": {
|
869
|
-
"description": "
|
880
|
+
"qwen-vl-plus-latest": {
|
881
|
+
"description": "้็พฉๅๅใฎๅคง่ฆๆจก่ฆ่ฆ่จ่ชใขใใซใฎๅผทๅ็ใ่ฉณ็ดฐ่ช่ญ่ฝๅใจๆๅญ่ช่ญ่ฝๅใๅคงๅน
ใซๅไธใใใ100ไธใใฏใปใซไปฅไธใฎ่งฃๅๅบฆใจไปปๆใฎใขในใใฏใๆฏใฎ็ปๅใใตใใผใใใพใใ"
|
870
882
|
},
|
871
883
|
"qwen-vl-v1": {
|
872
884
|
"description": "Qwen-7B่จ่ชใขใใซใๅๆๅใใ็ปๅใขใใซใ่ฟฝๅ ใใใ็ปๅๅ
ฅๅ่งฃๅๅบฆ448ใฎไบๅใใฌใผใใณใฐใขใใซใงใใ"
|
@@ -30,6 +30,9 @@
|
|
30
30
|
"groq": {
|
31
31
|
"description": "GroqใฎLPUๆจ่ซใจใณใธใณใฏใๆๆฐใฎ็ฌ็ซใใๅคง่ฆๆจก่จ่ชใขใใซ๏ผLLM๏ผใใณใใใผใฏใในใใงๅ่ถใใใใใฉใผใใณในใ็คบใใใใฎ้ฉ็ฐ็ใช้ๅบฆใจๅน็ใงAIใฝใชใฅใผใทใงใณใฎๅบๆบใๅๅฎ็พฉใใฆใใพใใGroqใฏใๅณๆๆจ่ซ้ๅบฆใฎไปฃ่กจใงใใใใฏใฉใฆใใใผในใฎๅฑ้ใง่ฏๅฅฝใชใใใฉใผใใณในใ็บๆฎใใฆใใพใใ"
|
32
32
|
},
|
33
|
+
"huggingface": {
|
34
|
+
"description": "HuggingFace Inference APIใฏใๆฐๅใฎใขใใซใใใพใใพใชใฟในใฏใซๅฏพใใฆๆข็ดขใใใใใฎ่ฟ
้ใใค็กๆใฎๆนๆณใๆไพใใพใใๆฐใใใขใใชใฑใผใทใงใณใฎใใญใใฟใคใใไฝๆใใฆใใๅ ดๅใงใใๆฉๆขฐๅญฆ็ฟใฎๆฉ่ฝใ่ฉฆใใฆใใๅ ดๅใงใใใใฎAPIใฏ่คๆฐใฎๅ้ใฎ้ซๆง่ฝใขใใซใซๅณๅบงใซใขใฏใปในใงใใใใใซใใพใใ"
|
35
|
+
},
|
33
36
|
"hunyuan": {
|
34
37
|
"description": "ใใณใปใณใใ้็บใใๅคง่ฆๆจก่จ่ชใขใใซใงใใใๅผทๅใชไธญๅฝ่ชใฎๅตไฝ่ฝๅใ่ค้ใชๆ่ใซใใใ่ซ็็ๆจ่ซ่ฝๅใใใใฆไฟก้ ผๆงใฎ้ซใใฟในใฏๅฎ่ก่ฝๅใๅใใฆใใพใใ"
|
35
38
|
},
|
package/locales/ko-KR/error.json
CHANGED
@@ -67,6 +67,7 @@
|
|
67
67
|
"OllamaBizError": "Ollama ์๋น์ค ์์ฒญ ์ค ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค. ์๋ ์ ๋ณด๋ฅผ ํ์ธํ๊ณ ๋ค์ ์๋ํ์ญ์์ค.",
|
68
68
|
"OllamaServiceUnavailable": "Ollama ์๋น์ค๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค. Ollama๊ฐ ์ฌ๋ฐ๋ฅด๊ฒ ์๋ํ๋์ง ๋๋ Ollama์ ๊ต์ฐจ ๋๋ฉ์ธ ๊ตฌ์ฑ์ด ์ฌ๋ฐ๋ฅด๊ฒ ์ค์ ๋์๋์ง ํ์ธํ์ญ์์ค.",
|
69
69
|
"OpenAIBizError": "OpenAI ์๋น์ค ์์ฒญ ์ค ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค. ์๋ ์ ๋ณด๋ฅผ ํ์ธํ๊ณ ๋ค์ ์๋ํด์ฃผ์ธ์.",
|
70
|
+
"PermissionDenied": "์ฃ์กํฉ๋๋ค. ์ด ์๋น์ค์ ์ ๊ทผํ ๊ถํ์ด ์์ต๋๋ค. ํค์ ์ ๊ทผ ๊ถํ์ด ์๋์ง ํ์ธํด ์ฃผ์ธ์.",
|
70
71
|
"PluginApiNotFound": "์ฃ์กํฉ๋๋ค. ํ๋ฌ๊ทธ์ธ ์ค๋ช
์์ ํด๋น API๊ฐ ์์ต๋๋ค. ์์ฒญ ๋ฉ์๋์ ํ๋ฌ๊ทธ์ธ ์ค๋ช
์ API๊ฐ ์ผ์นํ๋์ง ํ์ธํด์ฃผ์ธ์.",
|
71
72
|
"PluginApiParamsError": "์ฃ์กํฉ๋๋ค. ํ๋ฌ๊ทธ์ธ ์์ฒญ์ ์
๋ ฅ ๋งค๊ฐ๋ณ์ ์ ํจ์ฑ ๊ฒ์ฌ์ ์คํจํ์ต๋๋ค. ์
๋ ฅ ๋งค๊ฐ๋ณ์์ API ์ค๋ช
์ ๋ณด๊ฐ ์ผ์นํ๋์ง ํ์ธํด์ฃผ์ธ์.",
|
72
73
|
"PluginFailToTransformArguments": "์ฃ์กํฉ๋๋ค. ํ๋ฌ๊ทธ์ธ ํธ์ถ ์ธ์ ๋ณํ์ ์คํจํ์ต๋๋ค. ๋์ฐ๋ฏธ ๋ฉ์์ง๋ฅผ ๋ค์ ์์ฑํ๊ฑฐ๋ ๋ ๊ฐ๋ ฅํ AI ๋ชจ๋ธ๋ก Tools Calling ๋ฅ๋ ฅ์ ๋ณ๊ฒฝํ ํ ๋ค์ ์๋ํด์ฃผ์ธ์.",
|
@@ -58,6 +58,13 @@
|
|
58
58
|
"title": "GitHub PAT"
|
59
59
|
}
|
60
60
|
},
|
61
|
+
"huggingface": {
|
62
|
+
"accessToken": {
|
63
|
+
"desc": "๋น์ ์ HuggingFace ํ ํฐ์ ์
๋ ฅํ์ธ์. [์ฌ๊ธฐ](https://huggingface.co/settings/tokens)๋ฅผ ํด๋ฆญํ์ฌ ์์ฑํ์ธ์.",
|
64
|
+
"placeholder": "hf_xxxxxxxxx",
|
65
|
+
"title": "HuggingFace ํ ํฐ"
|
66
|
+
}
|
67
|
+
},
|
61
68
|
"ollama": {
|
62
69
|
"checker": {
|
63
70
|
"desc": "ํ๋ก์ ์ฃผ์๊ฐ ์ฌ๋ฐ๋ฅด๊ฒ ์
๋ ฅ๋์๋์ง ํ
์คํธํฉ๋๋ค",
|
@@ -458,6 +458,9 @@
|
|
458
458
|
"google/gemma-2-27b-it": {
|
459
459
|
"description": "Gemma 2๋ ๊ฒฝ๋ํ์ ํจ์จ์ ์ธ ์ค๊ณ๋ฅผ ์ด์ด๊ฐ๋๋ค."
|
460
460
|
},
|
461
|
+
"google/gemma-2-2b-it": {
|
462
|
+
"description": "Google์ ๊ฒฝ๋ ์ง์ ์กฐ์ ๋ชจ๋ธ"
|
463
|
+
},
|
461
464
|
"google/gemma-2-9b-it": {
|
462
465
|
"description": "Gemma 2๋ Google์ ๊ฒฝ๋ํ๋ ์คํ ์์ค ํ
์คํธ ๋ชจ๋ธ ์๋ฆฌ์ฆ์
๋๋ค."
|
463
466
|
},
|
@@ -589,6 +592,12 @@
|
|
589
592
|
"llama-3.1-sonar-small-128k-online": {
|
590
593
|
"description": "Llama 3.1 Sonar Small Online ๋ชจ๋ธ์ 8B ๋งค๊ฐ๋ณ์๋ฅผ ๊ฐ์ถ๊ณ ์์ผ๋ฉฐ, ์ฝ 127,000๊ฐ์ ํ ํฐ์ ์ปจํ
์คํธ ๊ธธ์ด๋ฅผ ์ง์ํ์ฌ ์จ๋ผ์ธ ์ฑํ
์ ์ํด ์ค๊ณ๋์์ต๋๋ค."
|
591
594
|
},
|
595
|
+
"llama-3.2-11b-vision-preview": {
|
596
|
+
"description": "Llama 3.2๋ ์๊ฐ ๋ฐ ํ
์คํธ ๋ฐ์ดํฐ๋ฅผ ๊ฒฐํฉํ ์์
์ ์ฒ๋ฆฌํ๊ธฐ ์ํด ์ค๊ณ๋์์ต๋๋ค. ์ด๋ฏธ์ง ์ค๋ช
๋ฐ ์๊ฐ์ ์ง๋ฌธ ์๋ต๊ณผ ๊ฐ์ ์์
์์ ๋ฐ์ด๋ ์ฑ๋ฅ์ ๋ณด์ด๋ฉฐ, ์ธ์ด ์์ฑ๊ณผ ์๊ฐ์ ์ถ๋ก ๊ฐ์ ๊ฐ๊ทน์ ๋์ต๋๋ค."
|
597
|
+
},
|
598
|
+
"llama-3.2-90b-vision-preview": {
|
599
|
+
"description": "Llama 3.2๋ ์๊ฐ ๋ฐ ํ
์คํธ ๋ฐ์ดํฐ๋ฅผ ๊ฒฐํฉํ ์์
์ ์ฒ๋ฆฌํ๊ธฐ ์ํด ์ค๊ณ๋์์ต๋๋ค. ์ด๋ฏธ์ง ์ค๋ช
๋ฐ ์๊ฐ์ ์ง๋ฌธ ์๋ต๊ณผ ๊ฐ์ ์์
์์ ๋ฐ์ด๋ ์ฑ๋ฅ์ ๋ณด์ด๋ฉฐ, ์ธ์ด ์์ฑ๊ณผ ์๊ฐ์ ์ถ๋ก ๊ฐ์ ๊ฐ๊ทน์ ๋์ต๋๋ค."
|
600
|
+
},
|
592
601
|
"llama3-70b-8192": {
|
593
602
|
"description": "Meta Llama 3 70B๋ ๋นํ ๋ฐ ์๋ ๋ณต์ก์ฑ ์ฒ๋ฆฌ ๋ฅ๋ ฅ์ ์ ๊ณตํ๋ฉฐ, ๋์ ์๊ตฌ ์ฌํญ์ ๊ฐ์ง ํ๋ก์ ํธ์ ๋ง์ถคํ์ผ๋ก ์ค๊ณ๋์์ต๋๋ค."
|
594
603
|
},
|
@@ -643,6 +652,9 @@
|
|
643
652
|
"meta-llama/Llama-2-13b-chat-hf": {
|
644
653
|
"description": "LLaMA-2 Chat (13B)๋ ๋ฐ์ด๋ ์ธ์ด ์ฒ๋ฆฌ ๋ฅ๋ ฅ๊ณผ ์ฐ์ํ ์ํธ์์ฉ ๊ฒฝํ์ ์ ๊ณตํฉ๋๋ค."
|
645
654
|
},
|
655
|
+
"meta-llama/Llama-2-7b-chat-hf": {
|
656
|
+
"description": "์ต๊ณ ์ ๋ํ ๋ชจ๋ธ ์ค ํ๋"
|
657
|
+
},
|
646
658
|
"meta-llama/Llama-3-70b-chat-hf": {
|
647
659
|
"description": "LLaMA-3 Chat (70B)๋ ๊ฐ๋ ฅํ ์ฑํ
๋ชจ๋ธ๋ก, ๋ณต์กํ ๋ํ ์๊ตฌ๋ฅผ ์ง์ํฉ๋๋ค."
|
648
660
|
},
|
@@ -811,7 +823,7 @@
|
|
811
823
|
"open-mixtral-8x7b": {
|
812
824
|
"description": "Mixtral 8x7B๋ ํฌ์ ์ ๋ฌธ๊ฐ ๋ชจ๋ธ๋ก, ์ฌ๋ฌ ๋งค๊ฐ๋ณ์๋ฅผ ํ์ฉํ์ฌ ์ถ๋ก ์๋๋ฅผ ๋์ด๋ฉฐ, ๋ค๊ตญ์ด ๋ฐ ์ฝ๋ ์์ฑ ์์
์ฒ๋ฆฌ์ ์ ํฉํฉ๋๋ค."
|
813
825
|
},
|
814
|
-
"openai/gpt-4o
|
826
|
+
"openai/gpt-4o": {
|
815
827
|
"description": "ChatGPT-4o๋ ๋์ ๋ชจ๋ธ๋ก, ์ต์ ๋ฒ์ ์ ์ ์งํ๊ธฐ ์ํด ์ค์๊ฐ์ผ๋ก ์
๋ฐ์ดํธ๋ฉ๋๋ค. ๊ฐ๋ ฅํ ์ธ์ด ์ดํด ๋ฐ ์์ฑ ๋ฅ๋ ฅ์ ๊ฒฐํฉํ์ฌ ๊ณ ๊ฐ ์๋น์ค, ๊ต์ก ๋ฐ ๊ธฐ์ ์ง์์ ํฌํจํ ๋๊ท๋ชจ ์์ฉ ํ๋ก๊ทธ๋จ์ ์ ํฉํฉ๋๋ค."
|
816
828
|
},
|
817
829
|
"openai/gpt-4o-mini": {
|
@@ -862,11 +874,11 @@
|
|
862
874
|
"qwen-vl-chat-v1": {
|
863
875
|
"description": "ํต์์ฒ๋ฌธ VL์ ๋ค์ค ์ด๋ฏธ์ง, ๋ค์ค ํ์ฐจ ์ง๋ฌธ ์๋ต, ์ฐฝ์ ๋ฑ ์ ์ฐํ ์ํธ์์ฉ ๋ฐฉ์์ ์ง์ํ๋ ๋ชจ๋ธ์
๋๋ค."
|
864
876
|
},
|
865
|
-
"qwen-vl-max": {
|
866
|
-
"description": "ํต์์ฒ๋ฌธ ์ด๋๊ท๋ชจ
|
877
|
+
"qwen-vl-max-latest": {
|
878
|
+
"description": "ํต์์ฒ๋ฌธ ์ด๋๊ท๋ชจ ๋น์ฃผ์ผ ์ธ์ด ๋ชจ๋ธ. ๊ฐํํ์ ๋นํด ์๊ฐ์ ์ถ๋ก ๋ฅ๋ ฅ๊ณผ ์ง์ ์ค์ ๋ฅ๋ ฅ์ ๋ค์ ํ ๋ฒ ํฅ์์์ผ, ๋ ๋์ ์๊ฐ์ ์ธ์๊ณผ ์ธ์ง ์์ค์ ์ ๊ณตํฉ๋๋ค."
|
867
879
|
},
|
868
|
-
"qwen-vl-plus": {
|
869
|
-
"description": "ํต์์ฒ๋ฌธ ๋๊ท๋ชจ
|
880
|
+
"qwen-vl-plus-latest": {
|
881
|
+
"description": "ํต์์ฒ๋ฌธ ๋๊ท๋ชจ ๋น์ฃผ์ผ ์ธ์ด ๋ชจ๋ธ ๊ฐํํ. ์ธ๋ถ ์ฌํญ ์ธ์ ๋ฅ๋ ฅ๊ณผ ๋ฌธ์ ์ธ์ ๋ฅ๋ ฅ์ ํฌ๊ฒ ํฅ์์์ผฐ์ผ๋ฉฐ, ๋ฐฑ๋ง ํ์ ์ด์์ ํด์๋์ ์์์ ๊ฐ๋ก ์ธ๋ก ๋น์จ์ ์ด๋ฏธ์ง๋ฅผ ์ง์ํฉ๋๋ค."
|
870
882
|
},
|
871
883
|
"qwen-vl-v1": {
|
872
884
|
"description": "Qwen-7B ์ธ์ด ๋ชจ๋ธ๋ก ์ด๊ธฐํ๋ ๋ชจ๋ธ๋ก, ์ด๋ฏธ์ง ๋ชจ๋ธ์ ์ถ๊ฐํ์ฌ ์ด๋ฏธ์ง ์
๋ ฅ ํด์๋๊ฐ 448์ธ ์ฌ์ ํ๋ จ ๋ชจ๋ธ์
๋๋ค."
|
@@ -30,6 +30,9 @@
|
|
30
30
|
"groq": {
|
31
31
|
"description": "Groq์ LPU ์ถ๋ก ์์ง์ ์ต์ ๋
๋ฆฝ ๋ํ ์ธ์ด ๋ชจ๋ธ(LLM) ๋ฒค์น๋งํฌ ํ
์คํธ์์ ๋ฐ์ด๋ ์ฑ๋ฅ์ ๋ณด์ด๋ฉฐ, ๋๋ผ์ด ์๋์ ํจ์จ์ฑ์ผ๋ก AI ์๋ฃจ์
์ ๊ธฐ์ค์ ์ฌ์ ์ํ๊ณ ์์ต๋๋ค. Groq๋ ์ฆ๊ฐ์ ์ธ ์ถ๋ก ์๋์ ๋ํ์ฃผ์๋ก, ํด๋ผ์ฐ๋ ๊ธฐ๋ฐ ๋ฐฐํฌ์์ ์ฐ์ํ ์ฑ๋ฅ์ ๋ณด์ฌ์ค๋๋ค."
|
32
32
|
},
|
33
|
+
"huggingface": {
|
34
|
+
"description": "HuggingFace Inference API๋ ์์ฒ ๊ฐ์ ๋ชจ๋ธ์ ํ์ํ ์ ์๋ ๋น ๋ฅด๊ณ ๋ฌด๋ฃ์ ๋ฐฉ๋ฒ์ ์ ๊ณตํฉ๋๋ค. ์๋ก์ด ์ ํ๋ฆฌ์ผ์ด์
์ ํ๋กํ ํ์
ํ๊ฑฐ๋ ๋จธ์ ๋ฌ๋์ ๊ธฐ๋ฅ์ ์๋ํ๋ ๊ฒฝ์ฐ, ์ด API๋ ์ฌ๋ฌ ๋ถ์ผ์ ๊ณ ์ฑ๋ฅ ๋ชจ๋ธ์ ์ฆ์ ์ ๊ทผํ ์ ์๊ฒ ํด์ค๋๋ค."
|
35
|
+
},
|
33
36
|
"hunyuan": {
|
34
37
|
"description": "ํ
์ผํธ๊ฐ ๊ฐ๋ฐํ ๋ํ ์ธ์ด ๋ชจ๋ธ๋ก, ๊ฐ๋ ฅํ ํ๊ตญ์ด ์ฐฝ์ ๋ฅ๋ ฅ๊ณผ ๋ณต์กํ ๋งฅ๋ฝ์์์ ๋
ผ๋ฆฌ์ ์ถ๋ก ๋ฅ๋ ฅ, ๊ทธ๋ฆฌ๊ณ ์ ๋ขฐํ ์ ์๋ ์์
์ํ ๋ฅ๋ ฅ์ ๊ฐ์ถ๊ณ ์์ต๋๋ค."
|
35
38
|
},
|
package/locales/nl-NL/error.json
CHANGED
@@ -67,6 +67,7 @@
|
|
67
67
|
"OllamaBizError": "Fout bij het aanroepen van de Ollama-service, controleer de onderstaande informatie en probeer opnieuw",
|
68
68
|
"OllamaServiceUnavailable": "Ollama-service niet beschikbaar. Controleer of Ollama correct werkt en of de cross-origin configuratie van Ollama juist is ingesteld.",
|
69
69
|
"OpenAIBizError": "Er is een fout opgetreden bij het aanvragen van de OpenAI-service. Controleer de volgende informatie of probeer het opnieuw.",
|
70
|
+
"PermissionDenied": "Het spijt ons, je hebt geen toestemming om deze service te gebruiken. Controleer of je sleutel de juiste toegangsrechten heeft.",
|
70
71
|
"PluginApiNotFound": "Sorry, de API van de plug-inbeschrijvingslijst bestaat niet. Controleer of uw verzoeksmethode overeenkomt met de plug-inbeschrijvingslijst API",
|
71
72
|
"PluginApiParamsError": "Sorry, de validatie van de invoerparameters van de plug-in is mislukt. Controleer of de invoerparameters overeenkomen met de API-beschrijving",
|
72
73
|
"PluginFailToTransformArguments": "Sorry, the plugin failed to parse the arguments. Please try regenerating the assistant message or retry with a more powerful AI model with Tools Calling capability.",
|