pygpt-net 2.6.23__py3-none-any.whl → 2.6.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +8 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/chat/response.py +6 -5
- pygpt_net/core/attachments/context.py +4 -4
- pygpt_net/core/idx/chat.py +1 -1
- pygpt_net/core/idx/indexing.py +3 -3
- pygpt_net/core/idx/llm.py +61 -2
- pygpt_net/data/config/config.json +21 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/settings.json +18 -0
- pygpt_net/data/locale/locale.de.ini +46 -0
- pygpt_net/data/locale/locale.en.ini +49 -1
- pygpt_net/data/locale/locale.es.ini +46 -0
- pygpt_net/data/locale/locale.fr.ini +46 -0
- pygpt_net/data/locale/locale.it.ini +46 -0
- pygpt_net/data/locale/locale.pl.ini +47 -1
- pygpt_net/data/locale/locale.uk.ini +46 -0
- pygpt_net/data/locale/locale.zh.ini +46 -0
- pygpt_net/provider/agents/llama_index/codeact_workflow.py +8 -7
- pygpt_net/provider/agents/llama_index/planner_workflow.py +11 -10
- pygpt_net/provider/agents/llama_index/supervisor_workflow.py +9 -8
- pygpt_net/provider/agents/openai/agent_b2b.py +30 -17
- pygpt_net/provider/agents/openai/agent_planner.py +29 -29
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +21 -23
- pygpt_net/provider/agents/openai/agent_with_feedback.py +21 -23
- pygpt_net/provider/agents/openai/bot_researcher.py +25 -30
- pygpt_net/provider/agents/openai/evolve.py +37 -39
- pygpt_net/provider/agents/openai/supervisor.py +16 -18
- pygpt_net/provider/core/config/patch.py +8 -0
- pygpt_net/provider/llms/anthropic.py +5 -4
- pygpt_net/provider/llms/google.py +2 -2
- pygpt_net/ui/widget/textarea/input.py +3 -3
- {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.24.dist-info}/METADATA +11 -2
- {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.24.dist-info}/RECORD +37 -37
- {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.24.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.24.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.24.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt
CHANGED
|
@@ -1,3 +1,11 @@
|
|
|
1
|
+
2.6.24 (2025-08-26)
|
|
2
|
+
|
|
3
|
+
- Added a new option: LlamaIndex -> Embeddings -> Default embedding providers for attachments.
|
|
4
|
+
- The same model provider is now used for both embedding and RAG query in attachment indexing.
|
|
5
|
+
- Translations have been added to Agents.
|
|
6
|
+
- Fixed fetching Anthropic models list.
|
|
7
|
+
- Added Google GenAI Embeddings.
|
|
8
|
+
|
|
1
9
|
2.6.23 (2025-08-25)
|
|
2
10
|
|
|
3
11
|
- Added an inline "Add a new chat" button to the right of the tabs.
|
pygpt_net/__init__.py
CHANGED
|
@@ -6,15 +6,15 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.26 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
__author__ = "Marcin Szczygliński"
|
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
|
15
15
|
__license__ = "MIT"
|
|
16
|
-
__version__ = "2.6.
|
|
17
|
-
__build__ = "2025-08-
|
|
16
|
+
__version__ = "2.6.24"
|
|
17
|
+
__build__ = "2025-08-26"
|
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
|
20
20
|
__report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.26 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any
|
|
@@ -264,15 +264,16 @@ class Response:
|
|
|
264
264
|
if global_mode not in self.AGENT_MODES_ALLOWED:
|
|
265
265
|
return # no agent mode, nothing to do
|
|
266
266
|
|
|
267
|
+
# agent evaluation finish
|
|
268
|
+
if ctx.extra is not None and (isinstance(ctx.extra, dict) and "agent_eval_finish" in ctx.extra):
|
|
269
|
+
controller.agent.llama.on_end(ctx)
|
|
270
|
+
return
|
|
271
|
+
|
|
267
272
|
# not agent final response
|
|
268
273
|
if ctx.extra is None or (isinstance(ctx.extra, dict) and "agent_finish" not in ctx.extra):
|
|
269
274
|
self.window.update_status(trans("status.agent.reasoning"))
|
|
270
275
|
controller.chat.common.lock_input() # lock input, re-enable stop button
|
|
271
276
|
|
|
272
|
-
if ctx.extra is not None and (isinstance(ctx.extra, dict) and "agent_eval_finish" in ctx.extra):
|
|
273
|
-
controller.agent.llama.on_end(ctx)
|
|
274
|
-
return
|
|
275
|
-
|
|
276
277
|
# agent final response
|
|
277
278
|
if ctx.extra is not None and (isinstance(ctx.extra, dict) and "agent_finish" in ctx.extra):
|
|
278
279
|
controller.agent.llama.on_finish(ctx) # evaluate response and continue if needed
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.26 01:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -496,12 +496,12 @@ class Context:
|
|
|
496
496
|
:param documents: list of documents (optional)
|
|
497
497
|
:return: list of doc IDs
|
|
498
498
|
"""
|
|
499
|
-
model =
|
|
499
|
+
model, model_item = self.get_selected_model("query")
|
|
500
500
|
doc_ids = []
|
|
501
501
|
if type == AttachmentItem.TYPE_FILE:
|
|
502
|
-
doc_ids = self.window.core.idx.indexing.index_attachment(source, idx_path,
|
|
502
|
+
doc_ids = self.window.core.idx.indexing.index_attachment(source, idx_path, model_item, documents)
|
|
503
503
|
elif type == AttachmentItem.TYPE_URL:
|
|
504
|
-
doc_ids = self.window.core.idx.indexing.index_attachment_web(source, idx_path,
|
|
504
|
+
doc_ids = self.window.core.idx.indexing.index_attachment_web(source, idx_path, model_item, documents)
|
|
505
505
|
if self.is_verbose():
|
|
506
506
|
print("Attachments: indexed. Doc IDs: {}".format(doc_ids))
|
|
507
507
|
return doc_ids
|
pygpt_net/core/idx/chat.py
CHANGED
|
@@ -656,7 +656,7 @@ class Chat:
|
|
|
656
656
|
"""
|
|
657
657
|
if model is None:
|
|
658
658
|
model = self.window.core.models.from_defaults()
|
|
659
|
-
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False)
|
|
659
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False, auto_embed=True)
|
|
660
660
|
index = self.storage.get_ctx_idx(path, llm, embed_model)
|
|
661
661
|
|
|
662
662
|
# 1. try to retrieve directly from index
|
pygpt_net/core/idx/indexing.py
CHANGED
|
@@ -1070,7 +1070,7 @@ class Indexing:
|
|
|
1070
1070
|
if model is None:
|
|
1071
1071
|
model = self.window.core.models.from_defaults()
|
|
1072
1072
|
|
|
1073
|
-
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False)
|
|
1073
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False, auto_embed=True)
|
|
1074
1074
|
index = self.window.core.idx.storage.get_ctx_idx(
|
|
1075
1075
|
index_path,
|
|
1076
1076
|
llm=llm,
|
|
@@ -1078,7 +1078,7 @@ class Indexing:
|
|
|
1078
1078
|
) # get or create ctx index
|
|
1079
1079
|
|
|
1080
1080
|
idx = f"tmp:{index_path}" # tmp index id
|
|
1081
|
-
self.window.core.idx.log(f"Indexing to context attachment index: {idx}...")
|
|
1081
|
+
self.window.core.idx.log(f"Indexing to context attachment index: {idx}... using model: {model.id}")
|
|
1082
1082
|
|
|
1083
1083
|
doc_ids = []
|
|
1084
1084
|
if documents is None:
|
|
@@ -1112,7 +1112,7 @@ class Indexing:
|
|
|
1112
1112
|
if model is None:
|
|
1113
1113
|
model = self.window.core.models.from_defaults()
|
|
1114
1114
|
|
|
1115
|
-
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False)
|
|
1115
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False, auto_embed=True)
|
|
1116
1116
|
index = self.window.core.idx.storage.get_ctx_idx(index_path, llm, embed_model) # get or create ctx index
|
|
1117
1117
|
|
|
1118
1118
|
idx = f"tmp:{index_path}" # tmp index id
|
pygpt_net/core/idx/llm.py
CHANGED
|
@@ -19,7 +19,7 @@ from llama_index.llms.openai import OpenAI
|
|
|
19
19
|
|
|
20
20
|
from pygpt_net.core.types import (
|
|
21
21
|
MODE_LLAMA_INDEX,
|
|
22
|
-
MODEL_DEFAULT_MINI,
|
|
22
|
+
MODEL_DEFAULT_MINI, MODE_CHAT,
|
|
23
23
|
)
|
|
24
24
|
from pygpt_net.item.model import ModelItem
|
|
25
25
|
|
|
@@ -128,14 +128,73 @@ class Llm:
|
|
|
128
128
|
self,
|
|
129
129
|
model: Optional[ModelItem] = None,
|
|
130
130
|
stream: bool = False,
|
|
131
|
+
auto_embed: bool = False,
|
|
131
132
|
):
|
|
132
133
|
"""
|
|
133
134
|
Get service context + embeddings provider
|
|
134
135
|
|
|
135
136
|
:param model: Model item (for query)
|
|
136
137
|
:param stream: Stream mode (True to enable streaming)
|
|
138
|
+
:param auto_embed: Auto-detect embeddings provider based on model capabilities
|
|
137
139
|
:return: Service context instance
|
|
138
140
|
"""
|
|
139
141
|
llm = self.get(model=model, stream=stream)
|
|
140
|
-
|
|
142
|
+
if not auto_embed:
|
|
143
|
+
embed_model = self.get_embeddings_provider()
|
|
144
|
+
else:
|
|
145
|
+
embed_model = self.get_custom_embed_provider(model=model)
|
|
141
146
|
return llm, embed_model
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def get_custom_embed_provider(self, model: Optional[ModelItem] = None) -> Optional[BaseEmbedding]:
|
|
150
|
+
"""
|
|
151
|
+
Get custom embeddings provider based on model
|
|
152
|
+
|
|
153
|
+
:param model: Model item
|
|
154
|
+
:return: Embeddings provider instance or None
|
|
155
|
+
"""
|
|
156
|
+
# base_embedding_provider = self.window.core.config.get("llama.idx.embeddings.provider", self.default_embed)
|
|
157
|
+
# if base_embedding_provider == model.provider:
|
|
158
|
+
# return self.get_embeddings_provider()
|
|
159
|
+
|
|
160
|
+
embed_model = None
|
|
161
|
+
args = []
|
|
162
|
+
|
|
163
|
+
# try to get custom args from config for the model provider
|
|
164
|
+
is_custom_provider = False
|
|
165
|
+
default = self.window.core.config.get("llama.idx.embeddings.default", [])
|
|
166
|
+
for item in default:
|
|
167
|
+
provider = item.get("provider", "")
|
|
168
|
+
if provider and provider == model.provider:
|
|
169
|
+
is_custom_provider = True
|
|
170
|
+
m = ModelItem()
|
|
171
|
+
m.provider = model.provider
|
|
172
|
+
client_args = self.window.core.models.prepare_client_args(MODE_CHAT, m)
|
|
173
|
+
model_name = item.get("model", "")
|
|
174
|
+
if not model_name:
|
|
175
|
+
model_name = model.id # fallback to model id if not set in config (Ollama, etc)
|
|
176
|
+
args = [
|
|
177
|
+
{
|
|
178
|
+
"name": "model_name",
|
|
179
|
+
"type": "str",
|
|
180
|
+
"value": model_name,
|
|
181
|
+
}
|
|
182
|
+
]
|
|
183
|
+
if model.provider != "ollama":
|
|
184
|
+
args.append(
|
|
185
|
+
{
|
|
186
|
+
"name": "api_key",
|
|
187
|
+
"type": "str",
|
|
188
|
+
"value": client_args.get("api_key", ""),
|
|
189
|
+
}
|
|
190
|
+
)
|
|
191
|
+
break
|
|
192
|
+
|
|
193
|
+
if is_custom_provider:
|
|
194
|
+
embed_model = self.window.core.llm.llms[model.provider].get_embeddings_model(
|
|
195
|
+
window=self.window,
|
|
196
|
+
config=args,
|
|
197
|
+
)
|
|
198
|
+
if not embed_model:
|
|
199
|
+
embed_model = self.get_embeddings_provider()
|
|
200
|
+
return embed_model
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-08-
|
|
3
|
+
"version": "2.6.24",
|
|
4
|
+
"app.version": "2.6.24",
|
|
5
|
+
"updated_at": "2025-08-26T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -253,6 +253,24 @@
|
|
|
253
253
|
"value": "{api_endpoint}"
|
|
254
254
|
}
|
|
255
255
|
],
|
|
256
|
+
"llama.idx.embeddings.default": [
|
|
257
|
+
{
|
|
258
|
+
"provider": "google",
|
|
259
|
+
"model": "gemini-embedding-001"
|
|
260
|
+
},
|
|
261
|
+
{
|
|
262
|
+
"provider": "openai",
|
|
263
|
+
"model": "text-embedding-3-small"
|
|
264
|
+
},
|
|
265
|
+
{
|
|
266
|
+
"provider": "azure_openai",
|
|
267
|
+
"model": "text-embedding-3-small"
|
|
268
|
+
},
|
|
269
|
+
{
|
|
270
|
+
"provider": "ollama",
|
|
271
|
+
"model": ""
|
|
272
|
+
}
|
|
273
|
+
],
|
|
256
274
|
"llama.idx.embeddings.limit.rpm": 100,
|
|
257
275
|
"llama.idx.excluded.ext": "3g2,3gp,7z,a,aac,aiff,alac,apk,apk,apng,app,ar,avif,bin,cab,class,deb,deb,dll,dmg,dmg,drv,dsd,dylib,dylib,ear,egg,elf,esd,exe,flac,flv,heic,heif,ico,img,iso,jar,ko,lib,lz,lz4,m2v,mpc,msi,nrg,o,ogg,ogv,pcm,pkg,pkg,psd,pyc,rar,rpm,rpm,so,so,svg,swm,sys,vdi,vhd,vhdx,vmdk,vob,war,whl,wim,wma,wmv,xz,zst",
|
|
258
276
|
"llama.idx.excluded.force": false,
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-08-
|
|
3
|
+
"version": "2.6.24",
|
|
4
|
+
"app.version": "2.6.24",
|
|
5
|
+
"updated_at": "2025-08-26T23:07:35"
|
|
6
6
|
},
|
|
7
7
|
"items": {
|
|
8
8
|
"SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
|
|
@@ -1697,6 +1697,24 @@
|
|
|
1697
1697
|
"advanced": false,
|
|
1698
1698
|
"tab": "embeddings"
|
|
1699
1699
|
},
|
|
1700
|
+
"llama.idx.embeddings.default": {
|
|
1701
|
+
"section": "llama-index",
|
|
1702
|
+
"type": "dict",
|
|
1703
|
+
"keys": {
|
|
1704
|
+
"provider": {
|
|
1705
|
+
"type": "combo",
|
|
1706
|
+
"use": "embeddings_providers"
|
|
1707
|
+
},
|
|
1708
|
+
"model": {
|
|
1709
|
+
"type": "str"
|
|
1710
|
+
}
|
|
1711
|
+
},
|
|
1712
|
+
"label": "settings.llama.idx.embeddings.default",
|
|
1713
|
+
"description": "settings.llama.idx.embeddings.default.desc",
|
|
1714
|
+
"value": [],
|
|
1715
|
+
"advanced": false,
|
|
1716
|
+
"tab": "embeddings"
|
|
1717
|
+
},
|
|
1700
1718
|
"llama.idx.recursive": {
|
|
1701
1719
|
"section": "llama-index",
|
|
1702
1720
|
"type": "bool",
|
|
@@ -72,7 +72,53 @@ action.use.read_cmd = Bitten, diese Datei zu lesen...
|
|
|
72
72
|
action.video.open = Video oder Audio öffnen...
|
|
73
73
|
action.video.play = Video oder Audio abspielen...
|
|
74
74
|
action.video.transcribe = Ton transkribieren...
|
|
75
|
+
agent.coder.additional.label = Zusätzlicher Prompt
|
|
76
|
+
agent.coder.additional.prompt.desc = Zusätzlicher Prompt für Agent (wird zum Basis-Prompt hinzugefügt)
|
|
77
|
+
agent.coder.base.label = Basis-Prompt
|
|
78
|
+
agent.coder.base.prompt.desc = Code ausführen prompt (initial)
|
|
79
|
+
agent.eval.feedback = Feedback
|
|
80
|
+
agent.eval.next = Neu ausführen mit Feedback
|
|
81
|
+
agent.eval.score = Bewertungsnote
|
|
82
|
+
agent.eval.score.good = Antwort ist gut genug, beenden.
|
|
83
|
+
agent.evolve.generation = Generation
|
|
84
|
+
agent.evolve.maxgen_limit = Maximale Generationen erreicht, beenden.
|
|
85
|
+
agent.evolve.option.max_generations = Maximale Generationen
|
|
86
|
+
agent.evolve.option.num_parents = Anzahl der Eltern
|
|
87
|
+
agent.evolve.running = Laufender Agent
|
|
88
|
+
agent.evolve.winner = Gewinner: Agent
|
|
75
89
|
agent.infinity.confirm.content = WARNUNG: Sie versuchen, eine unendliche Schleife zu starten! Dies kann zu einem hohen Tokenverbrauch führen. Sind Sie sicher, dass Sie fortfahren möchten?
|
|
90
|
+
agent.name.supervisor = Supervisor
|
|
91
|
+
agent.name.worker = Arbeiter
|
|
92
|
+
agent.option.model = Modell
|
|
93
|
+
agent.option.name = Name
|
|
94
|
+
agent.option.prompt = Prompt
|
|
95
|
+
agent.option.prompt.b1.desc = Prompt für Bot 1
|
|
96
|
+
agent.option.prompt.b2.desc = Prompt für Bot 2
|
|
97
|
+
agent.option.prompt.base.desc = Prompt für Basis-Agent
|
|
98
|
+
agent.option.prompt.chooser.desc = Prompt für Wähler-Agent
|
|
99
|
+
agent.option.prompt.feedback.desc = Prompt zur Feedback-Evaluierung
|
|
100
|
+
agent.option.prompt.planner.desc = Prompt für Planer-Agent
|
|
101
|
+
agent.option.prompt.search.desc = Prompt für Such-Agent
|
|
102
|
+
agent.option.prompt.supervisor.desc = Prompt für Supervisor
|
|
103
|
+
agent.option.prompt.worker.desc = Prompt für Arbeiter
|
|
104
|
+
agent.option.section.base = Basisagent
|
|
105
|
+
agent.option.section.chooser = Wähler
|
|
106
|
+
agent.option.section.feedback = Feedback
|
|
107
|
+
agent.option.section.planner = Planer
|
|
108
|
+
agent.option.section.search = Suche
|
|
109
|
+
agent.option.section.supervisor = Supervisor
|
|
110
|
+
agent.option.section.worker = Arbeiter
|
|
111
|
+
agent.option.section.writer = Schreiber
|
|
112
|
+
agent.option.tools.local = Lokale Werkzeuge zulassen
|
|
113
|
+
agent.option.tools.local.desc = Nutzung lokaler Werkzeuge für diesen Agenten zulassen
|
|
114
|
+
agent.option.tools.remote = Entfernte Werkzeuge zulassen
|
|
115
|
+
agent.option.tools.remote.desc = Nutzung entfernter Werkzeuge für diesen Agenten zulassen
|
|
116
|
+
agent.planner.plan.label = Planer (initial)
|
|
117
|
+
agent.planner.plan.prompt.desc = Initialer Plan-Prompt
|
|
118
|
+
agent.planner.refine.label = Planer (verfeinern)
|
|
119
|
+
agent.planner.refine.prompt.desc = Plan verfeinern prompt
|
|
120
|
+
agent.planner.step.label = Prompt ausführen
|
|
121
|
+
agent.planner.step.prompt.desc = Schritte ausführen prompt
|
|
76
122
|
alert.preset.empty_id = Name ist erforderlich.
|
|
77
123
|
alert.preset.no_chat_completion = Mindestens eine der Optionen: Chat, Vervollständigung, Bild oder Vision wird benötigt!
|
|
78
124
|
alert.snap.file_manager = Snap erkannt. Bitte das Verzeichnis manuell in Ihrem Dateimanager öffnen:
|
|
@@ -72,7 +72,53 @@ action.use.read_cmd = Ask for reading this file...
|
|
|
72
72
|
action.video.open = Open video or audio...
|
|
73
73
|
action.video.play = Play video or audio...
|
|
74
74
|
action.video.transcribe = Transcribe audio...
|
|
75
|
+
agent.coder.additional.label = Additional prompt
|
|
76
|
+
agent.coder.additional.prompt.desc = Additional prompt for agent (will be added to the base prompt)
|
|
77
|
+
agent.coder.base.label = Base prompt
|
|
78
|
+
agent.coder.base.prompt.desc = Code execute prompt (initial)
|
|
79
|
+
agent.eval.feedback = Feedback
|
|
80
|
+
agent.eval.next = Re-running with feedback
|
|
81
|
+
agent.eval.score = Evaluator score
|
|
82
|
+
agent.eval.score.good = Response is good enough, exiting.
|
|
83
|
+
agent.evolve.generation = Generation
|
|
84
|
+
agent.evolve.maxgen_limit = Max generations reached, exiting.
|
|
85
|
+
agent.evolve.option.max_generations = Max generations
|
|
86
|
+
agent.evolve.option.num_parents = Num of parents
|
|
87
|
+
agent.evolve.running = Running agent
|
|
88
|
+
agent.evolve.winner = Winner: agent
|
|
75
89
|
agent.infinity.confirm.content = WARNING: You are attempting to run an infinite loop! This may cause heavy token usage. Are you sure you want to continue?
|
|
90
|
+
agent.name.supervisor = Supervisor
|
|
91
|
+
agent.name.worker = Worker
|
|
92
|
+
agent.option.model = Model
|
|
93
|
+
agent.option.name = Name
|
|
94
|
+
agent.option.prompt = Prompt
|
|
95
|
+
agent.option.prompt.b1.desc = Prompt for bot 1
|
|
96
|
+
agent.option.prompt.b2.desc = Prompt for bot 2
|
|
97
|
+
agent.option.prompt.base.desc = Prompt for Base Agent
|
|
98
|
+
agent.option.prompt.chooser.desc = Prompt for Chooser agent
|
|
99
|
+
agent.option.prompt.feedback.desc = Prompt for feedback evaluation
|
|
100
|
+
agent.option.prompt.planner.desc = Prompt for Planner agent
|
|
101
|
+
agent.option.prompt.search.desc = Prompt for search agent
|
|
102
|
+
agent.option.prompt.supervisor.desc = Prompt for Supervisor
|
|
103
|
+
agent.option.prompt.worker.desc = Prompt for Worker
|
|
104
|
+
agent.option.section.base = Base agent
|
|
105
|
+
agent.option.section.chooser = Chooser
|
|
106
|
+
agent.option.section.feedback = Feedback
|
|
107
|
+
agent.option.section.planner = Planner
|
|
108
|
+
agent.option.section.search = Search
|
|
109
|
+
agent.option.section.supervisor = Supervisor
|
|
110
|
+
agent.option.section.worker = Worker
|
|
111
|
+
agent.option.section.writer = Writer
|
|
112
|
+
agent.option.tools.local = Allow local tools
|
|
113
|
+
agent.option.tools.local.desc = Allow usage of local tools for this agent
|
|
114
|
+
agent.option.tools.remote = Allowe remote tools
|
|
115
|
+
agent.option.tools.remote.desc = Allow usage of remote tools for this agent
|
|
116
|
+
agent.planner.plan.label = Planner (initial)
|
|
117
|
+
agent.planner.plan.prompt.desc = Initial plan prompt
|
|
118
|
+
agent.planner.refine.label = Planner (refine)
|
|
119
|
+
agent.planner.refine.prompt.desc = Plan refine prompt
|
|
120
|
+
agent.planner.step.label = Execute prompt
|
|
121
|
+
agent.planner.step.prompt.desc = Steps execute prompt
|
|
76
122
|
alert.preset.empty_id = Name is required.
|
|
77
123
|
alert.preset.no_chat_completion = At least one of: chat, completion, img or vision option is required!
|
|
78
124
|
alert.snap.file_manager = Snap detected. Please open the directory manually in your file manager:
|
|
@@ -1200,8 +1246,10 @@ settings.llama.idx.custom_meta = Custom metadata to append/replace to indexed do
|
|
|
1200
1246
|
settings.llama.idx.custom_meta.desc = Define custom metadata key => value fields for specified file extensions, separate extensions by comma.\nAllowed placeholders: {path}, {relative_path} {filename}, {dirname}, {relative_dir} {ext}, {size}, {mtime}, {date}, {date_time}, {time}, {timestamp}
|
|
1201
1247
|
settings.llama.idx.custom_meta.web = Custom metadata to append/replace to indexed documents (web/external content)
|
|
1202
1248
|
settings.llama.idx.custom_meta.web.desc = Define custom metadata key => value fields for specified external data loaders.\nAllowed placeholders: {date}, {date_time}, {time}, {timestamp} + {data loader args}
|
|
1203
|
-
settings.llama.idx.embeddings.args =
|
|
1249
|
+
settings.llama.idx.embeddings.args = Global embeddings provider **kwargs
|
|
1204
1250
|
settings.llama.idx.embeddings.args.desc = Additional keyword arguments (**kwargs), such as model name, for the embeddings provider instance. These arguments will be passed to the provider instance; please refer to the LlamaIndex API reference for a list of required arguments for the specified embeddings provider.
|
|
1251
|
+
settings.llama.idx.embeddings.default = Default embedding providers for attachments
|
|
1252
|
+
settings.llama.idx.embeddings.default.desc = Define embedding model by provider to use in attachments
|
|
1205
1253
|
settings.llama.idx.embeddings.env = Embeddings provider ENV vars
|
|
1206
1254
|
settings.llama.idx.embeddings.env.desc = Environment to set up before embedding provider initialization, such as API keys, etc. Use {config_key} as a placeholder to use the value from the application configuration.
|
|
1207
1255
|
settings.llama.idx.embeddings.limit.rpm = RPM limit
|
|
@@ -72,7 +72,53 @@ action.use.read_cmd = Pedir leer este archivo...
|
|
|
72
72
|
action.video.open = Abrir vídeo o audio...
|
|
73
73
|
action.video.play = Reproducir vídeo o audio...
|
|
74
74
|
action.video.transcribe = Transcribir audio...
|
|
75
|
+
agent.coder.additional.label = Prompt adicional
|
|
76
|
+
agent.coder.additional.prompt.desc = Prompt adicional para el agente (se añadirá al prompt base)
|
|
77
|
+
agent.coder.base.label = Prompt base
|
|
78
|
+
agent.coder.base.prompt.desc = Ejecutar código prompt (inicial)
|
|
79
|
+
agent.eval.feedback = Feedback
|
|
80
|
+
agent.eval.next = Reejecutando con feedback
|
|
81
|
+
agent.eval.score = Puntuación del evaluador
|
|
82
|
+
agent.eval.score.good = Respuesta suficientemente buena, saliendo.
|
|
83
|
+
agent.evolve.generation = Generación
|
|
84
|
+
agent.evolve.maxgen_limit = Máxima generación alcanzada, saliendo.
|
|
85
|
+
agent.evolve.option.max_generations = Máximas generaciones
|
|
86
|
+
agent.evolve.option.num_parents = Número de padres
|
|
87
|
+
agent.evolve.running = Agente en ejecución
|
|
88
|
+
agent.evolve.winner = Ganador: agente
|
|
75
89
|
agent.infinity.confirm.content = ADVERTENCIA: ¡Estás intentando ejecutar un bucle infinito! Esto puede causar un uso intensivo de tokens. ¿Estás seguro de que deseas continuar?
|
|
90
|
+
agent.name.supervisor = Supervisor
|
|
91
|
+
agent.name.worker = Trabajador
|
|
92
|
+
agent.option.model = Modelo
|
|
93
|
+
agent.option.name = Nombre
|
|
94
|
+
agent.option.prompt = Prompt
|
|
95
|
+
agent.option.prompt.b1.desc = Prompt para bot 1
|
|
96
|
+
agent.option.prompt.b2.desc = Prompt para bot 2
|
|
97
|
+
agent.option.prompt.base.desc = Prompt para Agente Base
|
|
98
|
+
agent.option.prompt.chooser.desc = Prompt para agente selector
|
|
99
|
+
agent.option.prompt.feedback.desc = Prompt para evaluación de feedback
|
|
100
|
+
agent.option.prompt.planner.desc = Prompt para agente planificador
|
|
101
|
+
agent.option.prompt.search.desc = Prompt para agente de búsqueda
|
|
102
|
+
agent.option.prompt.supervisor.desc = Prompt para Supervisor
|
|
103
|
+
agent.option.prompt.worker.desc = Prompt para Trabajador
|
|
104
|
+
agent.option.section.base = Agente base
|
|
105
|
+
agent.option.section.chooser = Selector
|
|
106
|
+
agent.option.section.feedback = Feedback
|
|
107
|
+
agent.option.section.planner = Planificador
|
|
108
|
+
agent.option.section.search = Búsqueda
|
|
109
|
+
agent.option.section.supervisor = Supervisor
|
|
110
|
+
agent.option.section.worker = Trabajador
|
|
111
|
+
agent.option.section.writer = Escritor
|
|
112
|
+
agent.option.tools.local = Permitir herramientas locales
|
|
113
|
+
agent.option.tools.local.desc = Permitir uso de herramientas locales para este agente
|
|
114
|
+
agent.option.tools.remote = Permitir herramientas remotas
|
|
115
|
+
agent.option.tools.remote.desc = Permitir uso de herramientas remotas para este agente
|
|
116
|
+
agent.planner.plan.label = Planificador (inicial)
|
|
117
|
+
agent.planner.plan.prompt.desc = Prompt del plan inicial
|
|
118
|
+
agent.planner.refine.label = Planificador (refinamiento)
|
|
119
|
+
agent.planner.refine.prompt.desc = Prompt de refinamiento del plan
|
|
120
|
+
agent.planner.step.label = Ejecutar prompt
|
|
121
|
+
agent.planner.step.prompt.desc = Pasos ejecutar prompt
|
|
76
122
|
alert.preset.empty_id = Se requiere un nombre.
|
|
77
123
|
alert.preset.no_chat_completion = Se requiere al menos una de las siguientes: chat, finalización, img o visión.
|
|
78
124
|
alert.snap.file_manager = Se detectó un snapshot. Por favor, abra el directorio manualmente en su gestor de archivos:
|
|
@@ -72,7 +72,53 @@ action.use.read_cmd = Demander à lire ce fichier...
|
|
|
72
72
|
action.video.open = Ouvrir une vidéo ou un audio...
|
|
73
73
|
action.video.play = Lire une vidéo ou un audio...
|
|
74
74
|
action.video.transcribe = Transcrire l'audio...
|
|
75
|
+
agent.coder.additional.label = Prompt supplémentaire
|
|
76
|
+
agent.coder.additional.prompt.desc = Prompt supplémentaire pour l'agent (sera ajouté au prompt de base)
|
|
77
|
+
agent.coder.base.label = Prompt de base
|
|
78
|
+
agent.coder.base.prompt.desc = Exécuter le code prompt (initial)
|
|
79
|
+
agent.eval.feedback = Feedback
|
|
80
|
+
agent.eval.next = Réexécution avec feedback
|
|
81
|
+
agent.eval.score = Score de l'évaluateur
|
|
82
|
+
agent.eval.score.good = Réponse suffisamment bonne, sortie.
|
|
83
|
+
agent.evolve.generation = Génération
|
|
84
|
+
agent.evolve.maxgen_limit = Nombre maximal de générations atteint, sortie.
|
|
85
|
+
agent.evolve.option.max_generations = Générations max
|
|
86
|
+
agent.evolve.option.num_parents = Nombre de parents
|
|
87
|
+
agent.evolve.running = Agent en cours d'exécution
|
|
88
|
+
agent.evolve.winner = Gagnant : agent
|
|
75
89
|
agent.infinity.confirm.content = AVERTISSEMENT : Vous êtes sur le point de lancer une boucle infinie ! Cela pourrait entraîner une consommation élevée de jetons. Êtes-vous sûr de vouloir continuer ?
|
|
90
|
+
agent.name.supervisor = Superviseur
|
|
91
|
+
agent.name.worker = Travailleur
|
|
92
|
+
agent.option.model = Modèle
|
|
93
|
+
agent.option.name = Nom
|
|
94
|
+
agent.option.prompt = Prompt
|
|
95
|
+
agent.option.prompt.b1.desc = Prompt pour bot 1
|
|
96
|
+
agent.option.prompt.b2.desc = Prompt pour bot 2
|
|
97
|
+
agent.option.prompt.base.desc = Prompt pour agent de base
|
|
98
|
+
agent.option.prompt.chooser.desc = Prompt pour agent choississeur
|
|
99
|
+
agent.option.prompt.feedback.desc = Prompt pour évaluation du feedback
|
|
100
|
+
agent.option.prompt.planner.desc = Prompt pour agent planificateur
|
|
101
|
+
agent.option.prompt.search.desc = Prompt pour agent de recherche
|
|
102
|
+
agent.option.prompt.supervisor.desc = Prompt pour superviseur
|
|
103
|
+
agent.option.prompt.worker.desc = Prompt pour travailleur
|
|
104
|
+
agent.option.section.base = Agent de base
|
|
105
|
+
agent.option.section.chooser = Choisisseur
|
|
106
|
+
agent.option.section.feedback = Feedback
|
|
107
|
+
agent.option.section.planner = Planificateur
|
|
108
|
+
agent.option.section.search = Recherche
|
|
109
|
+
agent.option.section.supervisor = Superviseur
|
|
110
|
+
agent.option.section.worker = Travailleur
|
|
111
|
+
agent.option.section.writer = Écrivain
|
|
112
|
+
agent.option.tools.local = Autoriser les outils locaux
|
|
113
|
+
agent.option.tools.local.desc = Autoriser l'utilisation des outils locaux pour cet agent
|
|
114
|
+
agent.option.tools.remote = Autoriser les outils distants
|
|
115
|
+
agent.option.tools.remote.desc = Autoriser l'utilisation des outils distants pour cet agent
|
|
116
|
+
agent.planner.plan.label = Planificateur (initial)
|
|
117
|
+
agent.planner.plan.prompt.desc = Prompt initial du plan
|
|
118
|
+
agent.planner.refine.label = Planificateur (raffinement)
|
|
119
|
+
agent.planner.refine.prompt.desc = Prompt de raffinement du plan
|
|
120
|
+
agent.planner.step.label = Exécuter le prompt
|
|
121
|
+
agent.planner.step.prompt.desc = Étapes d'exécution du prompt
|
|
76
122
|
alert.preset.empty_id = Le nom est requis.
|
|
77
123
|
alert.preset.no_chat_completion = Au moins une des options suivantes est requise : chat, complétion, img ou vision !
|
|
78
124
|
alert.snap.file_manager = Snap détecté. Veuillez ouvrir le répertoire manuellement dans votre gestionnaire de fichiers :
|
|
@@ -72,7 +72,53 @@ action.use.read_cmd = Chiedi di leggere questo file...
|
|
|
72
72
|
action.video.open = Apri video o audio...
|
|
73
73
|
action.video.play = Riproduci video o audio...
|
|
74
74
|
action.video.transcribe = Trascrivi audio...
|
|
75
|
+
agent.coder.additional.label = Prompt aggiuntivo
|
|
76
|
+
agent.coder.additional.prompt.desc = Prompt aggiuntivo per l'agente (verrà aggiunto al prompt di base)
|
|
77
|
+
agent.coder.base.label = Prompt di base
|
|
78
|
+
agent.coder.base.prompt.desc = Esecuzione codice prompt (iniziale)
|
|
79
|
+
agent.eval.feedback = Feedback
|
|
80
|
+
agent.eval.next = Riesecuzione con feedback
|
|
81
|
+
agent.eval.score = Punteggio valutatore
|
|
82
|
+
agent.eval.score.good = Risposta abbastanza buona, uscita.
|
|
83
|
+
agent.evolve.generation = Generazione
|
|
84
|
+
agent.evolve.maxgen_limit = Limite di generazioni massime raggiunto, uscita.
|
|
85
|
+
agent.evolve.option.max_generations = Generazioni massime
|
|
86
|
+
agent.evolve.option.num_parents = Numero di genitori
|
|
87
|
+
agent.evolve.running = Agente in esecuzione
|
|
88
|
+
agent.evolve.winner = Vincitore: agente
|
|
75
89
|
agent.infinity.confirm.content = AVVERTIMENTO: Stai tentando di eseguire un ciclo infinito! Questo potrebbe causare un uso elevato di token. Sei sicuro di voler continuare?
|
|
90
|
+
agent.name.supervisor = Supervisore
|
|
91
|
+
agent.name.worker = Lavoratore
|
|
92
|
+
agent.option.model = Modello
|
|
93
|
+
agent.option.name = Nome
|
|
94
|
+
agent.option.prompt = Prompt
|
|
95
|
+
agent.option.prompt.b1.desc = Prompt per bot 1
|
|
96
|
+
agent.option.prompt.b2.desc = Prompt per bot 2
|
|
97
|
+
agent.option.prompt.base.desc = Prompt per agente base
|
|
98
|
+
agent.option.prompt.chooser.desc = Prompt per agente selezionatore
|
|
99
|
+
agent.option.prompt.feedback.desc = Prompt per valutazione feedback
|
|
100
|
+
agent.option.prompt.planner.desc = Prompt per agente pianificatore
|
|
101
|
+
agent.option.prompt.search.desc = Prompt per agente di ricerca
|
|
102
|
+
agent.option.prompt.supervisor.desc = Prompt per Supervisore
|
|
103
|
+
agent.option.prompt.worker.desc = Prompt per Lavoratore
|
|
104
|
+
agent.option.section.base = Agente Base
|
|
105
|
+
agent.option.section.chooser = Scegliere
|
|
106
|
+
agent.option.section.feedback = Feedback
|
|
107
|
+
agent.option.section.planner = Pianificatore
|
|
108
|
+
agent.option.section.search = Ricerca
|
|
109
|
+
agent.option.section.supervisor = Supervisore
|
|
110
|
+
agent.option.section.worker = Lavoratore
|
|
111
|
+
agent.option.section.writer = Scrittore
|
|
112
|
+
agent.option.tools.local = Consenti strumenti locali
|
|
113
|
+
agent.option.tools.local.desc = Consenti utilizzo strumenti locali per questo agente
|
|
114
|
+
agent.option.tools.remote = Consenti strumenti remoti
|
|
115
|
+
agent.option.tools.remote.desc = Consenti utilizzo strumenti remoti per questo agente
|
|
116
|
+
agent.planner.plan.label = Pianificatore (iniziale)
|
|
117
|
+
agent.planner.plan.prompt.desc = Prompt del piano iniziale
|
|
118
|
+
agent.planner.refine.label = Pianificatore (raffinamento)
|
|
119
|
+
agent.planner.refine.prompt.desc = Prompt di raffinamento del piano
|
|
120
|
+
agent.planner.step.label = Esegui prompt
|
|
121
|
+
agent.planner.step.prompt.desc = Esecuzione dei passi del prompt
|
|
76
122
|
alert.preset.empty_id = Il nome è richiesto.
|
|
77
123
|
alert.preset.no_chat_completion = È richiesta almeno una delle opzioni: chat, completamento, immagine o visione!
|
|
78
124
|
alert.snap.file_manager = Rilevato Snap. Apri la cartella manualmente nel file manager:
|
|
@@ -72,7 +72,53 @@ action.use.read_cmd = Poproś o odczytanie tego pliku...
|
|
|
72
72
|
action.video.open = Otwórz wideo lub audio...
|
|
73
73
|
action.video.play = Odtwórz wideo lub audio...
|
|
74
74
|
action.video.transcribe = Przepisz dźwięk...
|
|
75
|
+
agent.coder.additional.label = Dodatkowy prompt
|
|
76
|
+
agent.coder.additional.prompt.desc = Dodatkowy prompt dla agenta (zostanie dodany do podstawowego promptu)
|
|
77
|
+
agent.coder.base.label = Podstawowy prompt
|
|
78
|
+
agent.coder.base.prompt.desc = Wykonywanie kodu promptu (wstępny)
|
|
79
|
+
agent.eval.feedback = Feedback
|
|
80
|
+
agent.eval.next = Ponowne uruchomienie z feedbackiem
|
|
81
|
+
agent.eval.score = Ocena ewaluatora
|
|
82
|
+
agent.eval.score.good = Odpowiedź wystarczająco dobra, wychodzenie.
|
|
83
|
+
agent.evolve.generation = Generacja
|
|
84
|
+
agent.evolve.maxgen_limit = Osiągnięto maksymalną liczbę generacji, wychodzenie.
|
|
85
|
+
agent.evolve.option.max_generations = Maksymalna liczba generacji
|
|
86
|
+
agent.evolve.option.num_parents = Liczba rodziców
|
|
87
|
+
agent.evolve.running = Działający agent
|
|
88
|
+
agent.evolve.winner = Zwycięzca: agent
|
|
75
89
|
agent.infinity.confirm.content = UWAGA: Próbujesz uruchomić nieskończoną pętlę! Może to spowodować intensywne zużycie tokenów. Czy na pewno chcesz kontynuować?
|
|
90
|
+
agent.name.supervisor = Supervisor
|
|
91
|
+
agent.name.worker = Worker
|
|
92
|
+
agent.option.model = Model
|
|
93
|
+
agent.option.name = Nazwa
|
|
94
|
+
agent.option.prompt = Prompt
|
|
95
|
+
agent.option.prompt.b1.desc = Prompt dla bota 1
|
|
96
|
+
agent.option.prompt.b2.desc = Prompt dla bota 2
|
|
97
|
+
agent.option.prompt.base.desc = Prompt dla agenta bazowego
|
|
98
|
+
agent.option.prompt.chooser.desc = Prompt dla agenta wybieracza
|
|
99
|
+
agent.option.prompt.feedback.desc = Prompt do ewaluacji feedbacku
|
|
100
|
+
agent.option.prompt.planner.desc = Prompt dla agenta planisty
|
|
101
|
+
agent.option.prompt.search.desc = Prompt dla agenta wyszukującego
|
|
102
|
+
agent.option.prompt.supervisor.desc = Prompt dla Supervisora
|
|
103
|
+
agent.option.prompt.worker.desc = Prompt dla Workera
|
|
104
|
+
agent.option.section.base = Agent bazowy
|
|
105
|
+
agent.option.section.chooser = Wybieracz
|
|
106
|
+
agent.option.section.feedback = Feedback
|
|
107
|
+
agent.option.section.planner = Planista
|
|
108
|
+
agent.option.section.search = Wyszukiwanie
|
|
109
|
+
agent.option.section.supervisor = Supervisor
|
|
110
|
+
agent.option.section.worker = Worker
|
|
111
|
+
agent.option.section.writer = Piszący
|
|
112
|
+
agent.option.tools.local = Pozwól na lokalne narzędzia
|
|
113
|
+
agent.option.tools.local.desc = Pozwól na użycie lokalnych narzędzi dla tego agenta
|
|
114
|
+
agent.option.tools.remote = Pozwól na zdalne narzędzia
|
|
115
|
+
agent.option.tools.remote.desc = Pozwól na użycie zdalnych narzędzi dla tego agenta
|
|
116
|
+
agent.planner.plan.label = Planista (wstępny)
|
|
117
|
+
agent.planner.plan.prompt.desc = Wstępny prompt planu
|
|
118
|
+
agent.planner.refine.label = Planista (refine)
|
|
119
|
+
agent.planner.refine.prompt.desc = Refine prompt planu
|
|
120
|
+
agent.planner.step.label = Wykonaj prompt
|
|
121
|
+
agent.planner.step.prompt.desc = Wykonywanie kroków promptu
|
|
76
122
|
alert.preset.empty_id = Podanie nazwy jest wymagane.
|
|
77
123
|
alert.preset.no_chat_completion = Przynajmniej jedna opcja: czat, completion, obraz albo wizja jest wymagana!
|
|
78
124
|
alert.snap.file_manager = Uruchomiono za pomocą Snap-a. Proszę otworzyć katalog manualnie za pomocą przeglądarki plików:
|
|
@@ -113,7 +159,7 @@ attachments.auto_index = Auto-indeksacja przy przesyłaniu
|
|
|
113
159
|
attachments.btn.add = Dodaj plik
|
|
114
160
|
attachments.btn.add_url = Sieć
|
|
115
161
|
attachments.btn.clear = Wyczyść
|
|
116
|
-
attachments.btn.input.add = Dodaj załącznik
|
|
162
|
+
attachments.btn.input.add = Dodaj załącznik
|
|
117
163
|
attachments.capture_clear = Wyczyść przy przechwytywaniu
|
|
118
164
|
attachments.clear.confirm = Wyczyścić listę plików?
|
|
119
165
|
attachments.ctx.indexed = Tak
|