pygpt-net 2.6.61__py3-none-any.whl → 2.6.63__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +12 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/chat/response.py +8 -2
- pygpt_net/controller/presets/editor.py +65 -1
- pygpt_net/controller/settings/profile.py +16 -4
- pygpt_net/controller/settings/workdir.py +30 -5
- pygpt_net/controller/theme/common.py +4 -2
- pygpt_net/controller/theme/markdown.py +2 -2
- pygpt_net/controller/theme/theme.py +2 -1
- pygpt_net/controller/ui/ui.py +31 -3
- pygpt_net/core/agents/custom/llama_index/runner.py +30 -52
- pygpt_net/core/agents/custom/runner.py +199 -76
- pygpt_net/core/agents/runners/llama_workflow.py +122 -12
- pygpt_net/core/agents/runners/openai_workflow.py +2 -1
- pygpt_net/core/node_editor/types.py +13 -1
- pygpt_net/core/render/web/renderer.py +76 -11
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
- pygpt_net/data/config/presets/agent_openai_coder.json +1 -15
- pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
- pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
- pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
- pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
- pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
- pygpt_net/data/config/presets/agent_supervisor.json +1 -11
- pygpt_net/data/css/style.dark.css +18 -0
- pygpt_net/data/css/style.light.css +20 -1
- pygpt_net/data/js/app/runtime.js +4 -1
- pygpt_net/data/js/app.min.js +3 -2
- pygpt_net/data/locale/locale.de.ini +2 -0
- pygpt_net/data/locale/locale.en.ini +7 -0
- pygpt_net/data/locale/locale.es.ini +2 -0
- pygpt_net/data/locale/locale.fr.ini +2 -0
- pygpt_net/data/locale/locale.it.ini +2 -0
- pygpt_net/data/locale/locale.pl.ini +3 -1
- pygpt_net/data/locale/locale.uk.ini +2 -0
- pygpt_net/data/locale/locale.zh.ini +2 -0
- pygpt_net/item/ctx.py +23 -1
- pygpt_net/js_rc.py +13 -10
- pygpt_net/provider/agents/base.py +0 -0
- pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/codeact.py +9 -6
- pygpt_net/provider/agents/llama_index/workflow/openai.py +38 -11
- pygpt_net/provider/agents/llama_index/workflow/planner.py +248 -28
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +60 -10
- pygpt_net/provider/agents/openai/agent.py +3 -1
- pygpt_net/provider/agents/openai/agent_b2b.py +17 -13
- pygpt_net/provider/agents/openai/agent_planner.py +617 -258
- pygpt_net/provider/agents/openai/agent_with_experts.py +4 -1
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +8 -6
- pygpt_net/provider/agents/openai/agent_with_feedback.py +8 -6
- pygpt_net/provider/agents/openai/evolve.py +12 -8
- pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/openai/supervisor.py +292 -37
- pygpt_net/provider/api/openai/agents/response.py +1 -0
- pygpt_net/provider/api/x_ai/__init__.py +0 -0
- pygpt_net/provider/core/agent/__init__.py +0 -0
- pygpt_net/provider/core/agent/base.py +0 -0
- pygpt_net/provider/core/agent/json_file.py +0 -0
- pygpt_net/provider/core/config/patch.py +8 -0
- pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
- pygpt_net/provider/llms/base.py +0 -0
- pygpt_net/provider/llms/deepseek_api.py +0 -0
- pygpt_net/provider/llms/google.py +0 -0
- pygpt_net/provider/llms/hugging_face_api.py +0 -0
- pygpt_net/provider/llms/hugging_face_router.py +0 -0
- pygpt_net/provider/llms/mistral.py +0 -0
- pygpt_net/provider/llms/perplexity.py +0 -0
- pygpt_net/provider/llms/x_ai.py +0 -0
- pygpt_net/tools/agent_builder/tool.py +6 -0
- pygpt_net/tools/agent_builder/ui/dialogs.py +0 -41
- pygpt_net/ui/layout/toolbox/presets.py +14 -2
- pygpt_net/ui/main.py +2 -2
- pygpt_net/ui/widget/dialog/confirm.py +55 -5
- pygpt_net/ui/widget/draw/painter.py +90 -1
- pygpt_net/ui/widget/lists/preset.py +289 -25
- pygpt_net/ui/widget/node_editor/editor.py +53 -15
- pygpt_net/ui/widget/node_editor/node.py +82 -104
- pygpt_net/ui/widget/node_editor/view.py +4 -5
- pygpt_net/ui/widget/textarea/input.py +155 -21
- {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/METADATA +22 -8
- {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/RECORD +70 -70
- {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.61.dist-info → pygpt_net-2.6.63.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.17
|
|
9
|
+
# Updated Date: 2025.09.26 17:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -16,15 +16,13 @@ import html as _html
|
|
|
16
16
|
from dataclasses import dataclass, field
|
|
17
17
|
|
|
18
18
|
from datetime import datetime
|
|
19
|
-
from typing import Optional, List, Any,
|
|
19
|
+
from typing import Optional, List, Any, Tuple
|
|
20
20
|
from time import monotonic
|
|
21
21
|
from io import StringIO
|
|
22
22
|
|
|
23
|
-
from PySide6.QtCore import QTimer,
|
|
24
|
-
from PySide6.QtWebEngineCore import QWebEnginePage
|
|
23
|
+
from PySide6.QtCore import QTimer, QCoreApplication, QEventLoop, QEvent
|
|
25
24
|
|
|
26
25
|
from pygpt_net.core.render.base import BaseRenderer
|
|
27
|
-
from pygpt_net.core.text.utils import has_unclosed_code_tag
|
|
28
26
|
from pygpt_net.item.ctx import CtxItem, CtxMeta
|
|
29
27
|
from pygpt_net.ui.widget.textarea.input import ChatInput
|
|
30
28
|
from pygpt_net.ui.widget.textarea.web import ChatWebOutput
|
|
@@ -155,6 +153,7 @@ class Renderer(BaseRenderer):
|
|
|
155
153
|
app_path = self.window.core.config.get_app_path() if self.window else ""
|
|
156
154
|
self._icon_expand = os.path.join(app_path, "data", "icons", "expand.svg")
|
|
157
155
|
self._icon_sync = os.path.join(app_path, "data", "icons", "sync.svg")
|
|
156
|
+
self._agent_avatar = os.path.join(app_path, "data", "icons", "robot.svg")
|
|
158
157
|
self._file_prefix = 'file:///' if self.window and self.window.core.platforms.is_windows() else 'file://'
|
|
159
158
|
|
|
160
159
|
# Bridge readiness for node append/replace path
|
|
@@ -378,6 +377,15 @@ class Renderer(BaseRenderer):
|
|
|
378
377
|
self.tool_output_end()
|
|
379
378
|
self.prev_chunk_replace = False
|
|
380
379
|
|
|
380
|
+
# Ensure stream header identity is up-to-date (agent/preset override)
|
|
381
|
+
try:
|
|
382
|
+
header = self.get_name_header(ctx, stream=True)
|
|
383
|
+
if pid is not None:
|
|
384
|
+
self.pids[pid].header = header
|
|
385
|
+
self._stream_header[pid] = header or ""
|
|
386
|
+
except Exception:
|
|
387
|
+
pass
|
|
388
|
+
|
|
381
389
|
def end(self, meta: CtxMeta, ctx: CtxItem, stream: bool = False):
|
|
382
390
|
"""
|
|
383
391
|
Render end
|
|
@@ -1163,6 +1171,26 @@ class Renderer(BaseRenderer):
|
|
|
1163
1171
|
meta = ctx.meta
|
|
1164
1172
|
if meta is None:
|
|
1165
1173
|
return ""
|
|
1174
|
+
|
|
1175
|
+
# Agent-provided display name override:
|
|
1176
|
+
# If ctx.get_agent_name() returns a non-empty name, force "fake personalize":
|
|
1177
|
+
# - use that name
|
|
1178
|
+
# - optionally attach default avatar when enabled via config
|
|
1179
|
+
# - treat as personalized header regardless of preset
|
|
1180
|
+
agent_name = self._get_agent_name(ctx)
|
|
1181
|
+
if agent_name:
|
|
1182
|
+
avatar_html = ""
|
|
1183
|
+
try:
|
|
1184
|
+
use_default = self.window.core.config.get("agent.avatar.default", True)
|
|
1185
|
+
if use_default and os.path.exists(self._agent_avatar):
|
|
1186
|
+
avatar_html = f"<img src=\"{self._file_prefix}{self._agent_avatar}\" class=\"avatar\"> "
|
|
1187
|
+
except Exception:
|
|
1188
|
+
pass
|
|
1189
|
+
if stream:
|
|
1190
|
+
return f"{avatar_html}{agent_name}"
|
|
1191
|
+
else:
|
|
1192
|
+
return f"<div class=\"name-header name-bot\">{avatar_html}{agent_name}</div>"
|
|
1193
|
+
|
|
1166
1194
|
preset_id = meta.preset
|
|
1167
1195
|
if preset_id is None or preset_id == "":
|
|
1168
1196
|
return ""
|
|
@@ -1904,23 +1932,60 @@ class Renderer(BaseRenderer):
|
|
|
1904
1932
|
|
|
1905
1933
|
# ------------------------- Helpers: build JSON blocks -------------------------
|
|
1906
1934
|
|
|
1935
|
+
def _get_agent_name(self, ctx: CtxItem) -> Optional[str]:
|
|
1936
|
+
"""
|
|
1937
|
+
Resolve agent-provided name from ctx if available.
|
|
1938
|
+
|
|
1939
|
+
This is used to force "fake personalize" on the UI:
|
|
1940
|
+
- when present and non-empty, we use this name,
|
|
1941
|
+
- optionally attach default avatar when enabled via config,
|
|
1942
|
+
- we set personalize flag to True in node payloads.
|
|
1943
|
+
"""
|
|
1944
|
+
try:
|
|
1945
|
+
if hasattr(ctx, "get_agent_name"):
|
|
1946
|
+
name = ctx.get_agent_name()
|
|
1947
|
+
if isinstance(name, str):
|
|
1948
|
+
name = name.strip()
|
|
1949
|
+
return name or None
|
|
1950
|
+
except Exception:
|
|
1951
|
+
pass
|
|
1952
|
+
return None
|
|
1953
|
+
|
|
1907
1954
|
def _output_identity(self, ctx: CtxItem) -> Tuple[str, Optional[str], bool]:
|
|
1908
1955
|
"""
|
|
1909
|
-
Resolve output identity (name, avatar file:// path) based on preset.
|
|
1956
|
+
Resolve output identity (name, avatar file:// path) based on preset or ctx-provided agent name.
|
|
1910
1957
|
|
|
1911
1958
|
:param ctx: context item
|
|
1912
1959
|
:return: (name, avatar, personalize)
|
|
1913
1960
|
"""
|
|
1961
|
+
# 1) Agent-provided name override -> force personalize, optionally default avatar
|
|
1962
|
+
agent_name = self._get_agent_name(ctx)
|
|
1963
|
+
if agent_name:
|
|
1964
|
+
avatar = None
|
|
1965
|
+
try:
|
|
1966
|
+
if self.window.core.config.get("agent.avatar.default", True) and os.path.exists(self._agent_avatar):
|
|
1967
|
+
avatar = f"{self._file_prefix}{self._agent_avatar}"
|
|
1968
|
+
except Exception:
|
|
1969
|
+
pass
|
|
1970
|
+
return agent_name, avatar, True
|
|
1971
|
+
|
|
1972
|
+
# 2) Fallback to preset-based personalize
|
|
1914
1973
|
meta = ctx.meta
|
|
1915
1974
|
if meta is None:
|
|
1916
|
-
return
|
|
1975
|
+
return "", None, False
|
|
1976
|
+
|
|
1977
|
+
pid = self.get_or_create_pid(meta)
|
|
1978
|
+
default_name = self.pids[pid].name_bot if pid in self.pids else ""
|
|
1979
|
+
|
|
1917
1980
|
preset_id = meta.preset
|
|
1918
1981
|
if not preset_id:
|
|
1919
|
-
return
|
|
1982
|
+
return default_name, None, False
|
|
1983
|
+
|
|
1920
1984
|
preset = self.window.core.presets.get(preset_id)
|
|
1921
1985
|
if preset is None or not preset.ai_personalize:
|
|
1922
|
-
return
|
|
1923
|
-
|
|
1986
|
+
return default_name, None, False
|
|
1987
|
+
|
|
1988
|
+
name = preset.ai_name or default_name
|
|
1924
1989
|
avatar = None
|
|
1925
1990
|
if preset.ai_avatar:
|
|
1926
1991
|
presets_dir = self.window.core.config.get_user_dir("presets")
|
|
@@ -1928,7 +1993,7 @@ class Renderer(BaseRenderer):
|
|
|
1928
1993
|
avatar_path = os.path.join(avatars_dir, preset.ai_avatar)
|
|
1929
1994
|
if os.path.exists(avatar_path):
|
|
1930
1995
|
avatar = f"{self._file_prefix}{avatar_path}"
|
|
1931
|
-
return name, avatar,
|
|
1996
|
+
return name, avatar, True
|
|
1932
1997
|
|
|
1933
1998
|
def _build_render_block(
|
|
1934
1999
|
self,
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-09-
|
|
3
|
+
"version": "2.6.63",
|
|
4
|
+
"app.version": "2.6.63",
|
|
5
|
+
"updated_at": "2025-09-27T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-09-
|
|
3
|
+
"version": "2.6.63",
|
|
4
|
+
"app.version": "2.6.63",
|
|
5
|
+
"updated_at": "2025-09-27T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"items": {
|
|
8
8
|
"SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
|
|
@@ -31,21 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_b2b": {
|
|
36
|
-
"bot_1": {
|
|
37
|
-
"prompt": "You're an advanced AI assistant and an expert in every field. Imagine that I am also such an AI assistant and converse with me in an expert manner. As two assistants, let's brainstorm and arrive at some advanced solutions.",
|
|
38
|
-
"allow_local_tools": false,
|
|
39
|
-
"allow_remote_tools": false
|
|
40
|
-
},
|
|
41
|
-
"bot_2": {
|
|
42
|
-
"model": "gpt-4o",
|
|
43
|
-
"prompt": "You're an advanced AI assistant and an expert in every field. Imagine that I am also such an AI assistant and converse with me in an expert manner. As two assistants, let's brainstorm and arrive at some advanced solutions.",
|
|
44
|
-
"allow_local_tools": false,
|
|
45
|
-
"allow_remote_tools": false
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
},
|
|
34
|
+
"extra": {},
|
|
49
35
|
"__meta__": {
|
|
50
36
|
"version": "2.5.94",
|
|
51
37
|
"app.version": "2.5.94",
|
|
@@ -31,21 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_feedback": {
|
|
36
|
-
"base": {
|
|
37
|
-
"prompt": "You are senior programmer and expert in coding. Use markdown for code blocks. If there is any feedback provided, use it to improve the code.",
|
|
38
|
-
"allow_local_tools": false,
|
|
39
|
-
"allow_remote_tools": false
|
|
40
|
-
},
|
|
41
|
-
"feedback": {
|
|
42
|
-
"model": "o3-mini-low",
|
|
43
|
-
"prompt": "You evaluate a code and decide if it's correct. If it's not correct, you provide feedback on what needs to be fixed and improved. Never give it a pass on the first try. After 5 attempts, you can give it a pass if the code is good enough. You can use tools for checking the code, running tests, etc.",
|
|
44
|
-
"allow_local_tools": false,
|
|
45
|
-
"allow_remote_tools": false
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
},
|
|
34
|
+
"extra": {},
|
|
49
35
|
"__meta__": {
|
|
50
36
|
"version": "2.5.81",
|
|
51
37
|
"app.version": "2.5.81",
|
|
@@ -31,29 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_evolve": {
|
|
36
|
-
"base": {
|
|
37
|
-
"num_parents": 2,
|
|
38
|
-
"max_generations": 10,
|
|
39
|
-
"prompt": "You generate a response based on the user's input. If there is any feedback provided, use it to improve the response.",
|
|
40
|
-
"allow_local_tools": false,
|
|
41
|
-
"allow_remote_tools": false
|
|
42
|
-
},
|
|
43
|
-
"chooser": {
|
|
44
|
-
"model": "gpt-4o",
|
|
45
|
-
"prompt": "I will give you a list of different answers to the given question. From the provided list, choose the best and most accurate answer and return the number of that answer to me, without any explanation, just the number of the answer.",
|
|
46
|
-
"allow_local_tools": false,
|
|
47
|
-
"allow_remote_tools": false
|
|
48
|
-
},
|
|
49
|
-
"feedback": {
|
|
50
|
-
"model": "gpt-4o",
|
|
51
|
-
"prompt": "You evaluate a result and decide if it's good enough. If it's not good enough, you provide feedback on what needs to be improved. Never give it a pass on the first try. After 5 attempts, you can give it a pass if the result is good enough - do not go for perfection.",
|
|
52
|
-
"allow_local_tools": false,
|
|
53
|
-
"allow_remote_tools": false
|
|
54
|
-
}
|
|
55
|
-
}
|
|
56
|
-
},
|
|
34
|
+
"extra": {},
|
|
57
35
|
"__meta__": {
|
|
58
36
|
"version": "2.5.85",
|
|
59
37
|
"app.version": "2.5.85",
|
|
@@ -31,27 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_planner": {
|
|
36
|
-
"base": {
|
|
37
|
-
"prompt": "Prepare a comprehensive and detailed response to the question based on the action plan. Follow each step outlined in the plan. If any feedback is provided, use it to improve the response.",
|
|
38
|
-
"allow_local_tools": false,
|
|
39
|
-
"allow_remote_tools": false
|
|
40
|
-
},
|
|
41
|
-
"planner": {
|
|
42
|
-
"model": "o3-mini-high",
|
|
43
|
-
"prompt": "Make a plan of task execution for the query by dividing a task into smaller steps. Do not provide any solutions here. The plan should only contain a list of steps as instructions for someone else to follow. Prepare a plan in the language in which the query was made. Format the plan using markdown.\n\nExample:\n\n----------------\n\n**Sub-task 1: <name>**\n\n- Description: <subtask description>\n- Expected output: <expected output>\n- Dependencies: []\n- Required Tools: []\n\n**Sub-task 2: <name>**\n\n- Description: <subtask description>\n- Expected output: <expected output>\n- Dependencies: [<subtask's 1 name>]\n- Required Tools: [WebSearch]\n\n[...]",
|
|
44
|
-
"allow_local_tools": false,
|
|
45
|
-
"allow_remote_tools": false
|
|
46
|
-
},
|
|
47
|
-
"feedback": {
|
|
48
|
-
"model": "gpt-4o",
|
|
49
|
-
"prompt": "You evaluate a result and decide if it's good enough. If it's not good enough, you provide feedback on what needs to be improved. Never give it a pass on the first try. After 5 attempts, you can give it a pass if the result is good enough - do not go for perfection, but ensure all tasks are completed.",
|
|
50
|
-
"allow_local_tools": false,
|
|
51
|
-
"allow_remote_tools": false
|
|
52
|
-
}
|
|
53
|
-
}
|
|
54
|
-
},
|
|
34
|
+
"extra": {},
|
|
55
35
|
"__meta__": {
|
|
56
36
|
"version": "2.5.81",
|
|
57
37
|
"app.version": "2.5.81",
|
|
@@ -31,27 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_bot_researcher": {
|
|
36
|
-
"writer": {
|
|
37
|
-
"prompt": "You are a senior researcher tasked with writing a cohesive report for a research query. You will be provided with the original query, and some initial research done by a research assistant.\nYou should first come up with an outline for the report that describes the structure and flow of the report. Then, generate the report and return that as your final output.\nThe final output should be in markdown format, and it should be lengthy and detailed. Aim for 5-10 pages of content, at least 1000 words.",
|
|
38
|
-
"allow_local_tools": false,
|
|
39
|
-
"allow_remote_tools": false
|
|
40
|
-
},
|
|
41
|
-
"planner": {
|
|
42
|
-
"model": "gpt-4o",
|
|
43
|
-
"prompt": "You are a helpful research assistant. Given a query, come up with a set of web searches to perform to best answer the query. Output between 5 and 20 terms to query for.",
|
|
44
|
-
"allow_local_tools": false,
|
|
45
|
-
"allow_remote_tools": false
|
|
46
|
-
},
|
|
47
|
-
"search": {
|
|
48
|
-
"model": "gpt-4o",
|
|
49
|
-
"prompt": "You are a research assistant. Given a search term, you search the web for that term and produce a concise summary of the results. The summary must be 2-3 paragraphs and less than 300 words. Capture the main points. Write succinctly, no need to have complete sentences or good grammar. This will be consumed by someone synthesizing a report, so its vital you capture the essence and ignore any fluff. Do not include any additional commentary other than the summary itself.",
|
|
50
|
-
"allow_local_tools": false,
|
|
51
|
-
"allow_remote_tools": true
|
|
52
|
-
}
|
|
53
|
-
}
|
|
54
|
-
},
|
|
34
|
+
"extra": {},
|
|
55
35
|
"__meta__": {
|
|
56
36
|
"version": "2.5.81",
|
|
57
37
|
"app.version": "2.5.81",
|
|
@@ -33,19 +33,7 @@
|
|
|
33
33
|
"enabled": true,
|
|
34
34
|
"description": "",
|
|
35
35
|
"remote_tools": "",
|
|
36
|
-
"extra": {
|
|
37
|
-
"openai_agent_supervisor": {
|
|
38
|
-
"supervisor": {
|
|
39
|
-
"prompt": "\n You are the \u201cSupervisor\u201d (orchestrator). You never use tools directly except the tool that runs the Worker.\n Process:\n - Decompose the user's task into actionable instructions for the Worker.\n - Do NOT pass your conversation history to the Worker. Pass ONLY a concise, self-contained instruction.\n - After each Worker result, evaluate against a clear Definition of Done (DoD). If not met, call the Worker again with a refined instruction.\n - Ask the user only if absolutely necessary. If you must, STOP and output a single JSON with:\n {\"action\":\"ask_user\",\"question\":\"...\",\"reasoning\":\"...\"}\n - When done, output a single JSON:\n {\"action\":\"final\",\"final_answer\":\"...\",\"reasoning\":\"...\"}\n - Otherwise, to run the Worker, call the run_worker tool with a short instruction.\n Respond in the user's language. Keep outputs short and precise.\n "
|
|
40
|
-
},
|
|
41
|
-
"worker": {
|
|
42
|
-
"model": "gpt-4o",
|
|
43
|
-
"prompt": "\n You are the \u201cWorker\u201d. You execute Supervisor instructions strictly, using your tools.\n - Keep your own memory across calls (Worker session).\n - Return a concise result with key evidence/extracts from tools when applicable.\n - Do not ask the user questions directly; if instruction is underspecified, clearly state what is missing.\n Respond in the user's language.\n ",
|
|
44
|
-
"allow_local_tools": true,
|
|
45
|
-
"allow_remote_tools": true
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
},
|
|
36
|
+
"extra": {},
|
|
49
37
|
"__meta__": {
|
|
50
38
|
"version": "2.6.8",
|
|
51
39
|
"app.version": "2.6.8",
|
|
@@ -31,21 +31,7 @@
|
|
|
31
31
|
"enabled": true,
|
|
32
32
|
"description": "",
|
|
33
33
|
"remote_tools": "",
|
|
34
|
-
"extra": {
|
|
35
|
-
"openai_agent_feedback": {
|
|
36
|
-
"base": {
|
|
37
|
-
"prompt": "You generate a very short story outline based on the user's input. If there is any feedback provided, use it to improve the outline.",
|
|
38
|
-
"allow_local_tools": false,
|
|
39
|
-
"allow_remote_tools": false
|
|
40
|
-
},
|
|
41
|
-
"feedback": {
|
|
42
|
-
"model": "gpt-4o",
|
|
43
|
-
"prompt": "You evaluate a story outline and decide if it's good enough. \nIf it's not good enough, you provide feedback on what needs to be improved. \nNever give it a pass on the first try. After 5 attempts, you can give it a pass if the story outline is good enough - do not go for perfection.",
|
|
44
|
-
"allow_local_tools": false,
|
|
45
|
-
"allow_remote_tools": false
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
},
|
|
34
|
+
"extra": {},
|
|
49
35
|
"__meta__": {
|
|
50
36
|
"version": "2.5.81",
|
|
51
37
|
"app.version": "2.5.81",
|
|
@@ -33,17 +33,7 @@
|
|
|
33
33
|
"enabled": true,
|
|
34
34
|
"description": "",
|
|
35
35
|
"remote_tools": "",
|
|
36
|
-
"extra": {
|
|
37
|
-
"supervisor": {
|
|
38
|
-
"supervisor": {
|
|
39
|
-
"prompt": "\nYou are the \u201cSupervisor\u201d \u2013 the main orchestrator. Do not use tools directly.\nYour tasks:\n- Break down the user's task into steps and create precise instructions for the \u201cWorker\u201d agent.\n- Do not pass your history/memory to the Worker. Only pass minimal, self-sufficient instructions.\n- After each Worker response, assess progress towards the Definition of Done (DoD). If not met \u2013 generate a better instruction.\n- Ask the user only when absolutely necessary. Then stop and return the question.\n- When the task is complete \u2013 return the final answer to the user.\nAlways return only ONE JSON object:\n{\n \"action\": \"task\" | \"final\" | \"ask_user\",\n \"instruction\": \"<Worker's instruction or ''>\",\n \"final_answer\": \"<final answer or ''>\",\n \"question\": \"<user question or ''>\",\n \"reasoning\": \"<brief reasoning and quality control>\",\n \"done_criteria\": \"<list/text of DoD criteria>\"\n}\nEnsure proper JSON (no comments, no trailing commas). Respond in the user's language.\n"
|
|
40
|
-
},
|
|
41
|
-
"worker": {
|
|
42
|
-
"model": "gpt-4o",
|
|
43
|
-
"prompt": "\nYou are the \u201cWorker\u201d \u2013 executor of the Supervisor's instructions. You have your own memory and tools.\n- Execute the Supervisor's instructions precisely and concisely.\n- Use the available tools and return a brief result + relevant data/reasoning.\n- Maintain the working context in your memory (only Worker).\n- Return plain text (not JSON) unless instructed otherwise by the Supervisor.\n- Respond in the user's language.\n"
|
|
44
|
-
}
|
|
45
|
-
}
|
|
46
|
-
},
|
|
36
|
+
"extra": {},
|
|
47
37
|
"__meta__": {
|
|
48
38
|
"version": "2.6.8",
|
|
49
39
|
"app.version": "2.6.8",
|
|
@@ -144,4 +144,22 @@ QCalendarWidget QAbstractItemView::item:hover {{
|
|
|
144
144
|
QCalendarWidget QMenu::item:selected:focus,
|
|
145
145
|
QCalendarWidget QMenu::item:selected {{
|
|
146
146
|
background: #3a4045;
|
|
147
|
+
}}
|
|
148
|
+
|
|
149
|
+
/* Node editor */
|
|
150
|
+
NodeEditor {{
|
|
151
|
+
qproperty-gridBackColor: #242629;
|
|
152
|
+
qproperty-gridPenColor: #3b3f46;
|
|
153
|
+
|
|
154
|
+
qproperty-nodeBackgroundColor: #2d2f34;
|
|
155
|
+
qproperty-nodeBorderColor: #4b4f57;
|
|
156
|
+
qproperty-nodeSelectionColor: #ff9900;
|
|
157
|
+
qproperty-nodeTitleColor: #3a3d44;
|
|
158
|
+
|
|
159
|
+
qproperty-portInputColor: #66b2ff;
|
|
160
|
+
qproperty-portOutputColor: #70e070;
|
|
161
|
+
qproperty-portConnectedColor: #ffd166;
|
|
162
|
+
|
|
163
|
+
qproperty-edgeColor: #c0c0c0;
|
|
164
|
+
qproperty-edgeSelectedColor: #ff8a5c;
|
|
147
165
|
}}
|
|
@@ -6,6 +6,7 @@
|
|
|
6
6
|
QWidget {{
|
|
7
7
|
color: #000;
|
|
8
8
|
margin: 1px;
|
|
9
|
+
background-color: #eeeeee;
|
|
9
10
|
}}
|
|
10
11
|
|
|
11
12
|
QListView,
|
|
@@ -248,7 +249,7 @@ QCalendarWidget QToolButton:hover {{
|
|
|
248
249
|
height: 20px;
|
|
249
250
|
}}
|
|
250
251
|
QMenu::indicator {{
|
|
251
|
-
border: 1px solid gray;
|
|
252
|
+
border: 1px solid gray;
|
|
252
253
|
}}
|
|
253
254
|
QCalendarWidget QAbstractItemView::item:selected:focus,
|
|
254
255
|
QCalendarWidget QAbstractItemView::item:selected {{
|
|
@@ -259,4 +260,22 @@ QCalendarWidget QAbstractItemView::item:hover {{
|
|
|
259
260
|
}}
|
|
260
261
|
.file-explorer QTreeView::branch {{
|
|
261
262
|
background: #fff;
|
|
263
|
+
}}
|
|
264
|
+
|
|
265
|
+
/* Node editor */
|
|
266
|
+
NodeEditor {{
|
|
267
|
+
qproperty-gridBackColor: #ffffff;
|
|
268
|
+
qproperty-gridPenColor: #eaeaea;
|
|
269
|
+
|
|
270
|
+
qproperty-nodeBackgroundColor: #2d2f34;
|
|
271
|
+
qproperty-nodeBorderColor: #4b4f57;
|
|
272
|
+
qproperty-nodeSelectionColor: #ff9900;
|
|
273
|
+
qproperty-nodeTitleColor: #3a3d44;
|
|
274
|
+
|
|
275
|
+
qproperty-portInputColor: #66b2ff;
|
|
276
|
+
qproperty-portOutputColor: #70e070;
|
|
277
|
+
qproperty-portConnectedColor: #ffd166;
|
|
278
|
+
|
|
279
|
+
qproperty-edgeColor: #c0c0c0;
|
|
280
|
+
qproperty-edgeSelectedColor: #ff8a5c;
|
|
262
281
|
}}
|
pygpt_net/data/js/app/runtime.js
CHANGED
|
@@ -267,7 +267,10 @@ class Runtime {
|
|
|
267
267
|
api_updateToolOutput = (c) => this.toolOutput.update(c);
|
|
268
268
|
api_clearToolOutput = () => this.toolOutput.clear();
|
|
269
269
|
api_beginToolOutput = () => this.toolOutput.begin();
|
|
270
|
-
api_endToolOutput = () =>
|
|
270
|
+
api_endToolOutput = () => {
|
|
271
|
+
this.toolOutput.end();
|
|
272
|
+
this.scrollMgr.scheduleScroll();
|
|
273
|
+
}
|
|
271
274
|
api_enableToolOutput = () => this.toolOutput.enable();
|
|
272
275
|
api_disableToolOutput = () => this.toolOutput.disable();
|
|
273
276
|
api_toggleToolOutput = (id) => this.toolOutput.toggle(id);
|
pygpt_net/data/js/app.min.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
/* app.min.js — generated on 2025-09-
|
|
1
|
+
/* app.min.js — generated on 2025-09-27 09:10:08 by bin/minify_js.py using rjsmin */
|
|
2
2
|
|
|
3
3
|
/* data/js/app/async.js */
|
|
4
4
|
class AsyncRunner{constructor(cfg,raf){this.cfg=cfg||{};this.raf=raf||null;const A=this.cfg.ASYNC||{};this.SLICE_MS=Utils.g('ASYNC_SLICE_MS',A.SLICE_MS??12);this.SLICE_HIDDEN_MS=Utils.g('ASYNC_SLICE_HIDDEN_MS',A.SLICE_HIDDEN_MS??Math.min(this.SLICE_MS,6));this.MIN_YIELD_MS=Utils.g('ASYNC_MIN_YIELD_MS',A.MIN_YIELD_MS??0);this._opGen=new Map();}
|
|
@@ -867,7 +867,8 @@ try{this.tips&&this.tips.hide();}catch(_){}}
|
|
|
867
867
|
api_onChunk=(name,chunk,type)=>{const t=String(type||'text_delta');if(t==='text_delta'){this.api_appendStream(name,chunk);return;}
|
|
868
868
|
this.logger.debug('STREAM','IGNORED_NON_TEXT_CHUNK',{type:t,len:(chunk?String(chunk).length:0)});};api_beginStream=(chunk=false)=>{this.tips&&this.tips.hide();this.resetStreamState('beginStream',{clearMsg:true,finalizeActive:false,forceHeavy:true});this.stream.beginStream(chunk);};api_endStream=()=>{this.stream.endStream();};api_applyStream=(name,chunk)=>{this.stream.applyStream(name,chunk);};api_appendStream=(name,chunk)=>{this.streamQ.enqueue(name,chunk);};api_nextStream=()=>{this.tips&&this.tips.hide();const element=this.dom.get('_append_output_');const before=this.dom.get('_append_output_before_');if(element&&before){const frag=document.createDocumentFragment();while(element.firstChild)frag.appendChild(element.firstChild);before.appendChild(frag);}
|
|
869
869
|
this.resetStreamState('nextStream',{clearMsg:true,finalizeActive:false,forceHeavy:true});this.scrollMgr.scheduleScroll();};api_clearStream=()=>{this.tips&&this.tips.hide();this.resetStreamState('clearStream',{clearMsg:true,forceHeavy:true});const el=this.dom.getStreamContainer();if(!el)return;el.replaceChildren();};api_appendNode=(payload)=>{this.resetStreamState('appendNode');this.data.append(payload);};api_replaceNodes=(payload)=>{this.resetStreamState('replaceNodes',{clearMsg:true,forceHeavy:true});this.dom.clearNodes();this.data.replace(payload);};api_appendToInput=(payload)=>{this.nodes.appendToInput(payload);this.scrollMgr.autoFollow=true;this.scrollMgr.userInteracted=false;try{this.scrollMgr.lastScrollTop=Utils.SE.scrollTop|0;}catch(_){}
|
|
870
|
-
this.scrollMgr.scheduleScroll();};api_clearNodes=()=>{this.dom.clearNodes();this.resetStreamState('clearNodes',{clearMsg:true,forceHeavy:true});};api_clearInput=()=>{this.resetStreamState('clearInput',{forceHeavy:true});this.dom.clearInput();};api_clearOutput=()=>{this.dom.clearOutput();this.resetStreamState('clearOutput',{clearMsg:true,forceHeavy:true});};api_clearLive=()=>{this.dom.clearLive();this.resetStreamState('clearLive',{forceHeavy:true});};api_appendToolOutput=(c)=>this.toolOutput.append(c);api_updateToolOutput=(c)=>this.toolOutput.update(c);api_clearToolOutput=()=>this.toolOutput.clear();api_beginToolOutput=()=>this.toolOutput.begin();api_endToolOutput=()=>this.toolOutput.end();
|
|
870
|
+
this.scrollMgr.scheduleScroll();};api_clearNodes=()=>{this.dom.clearNodes();this.resetStreamState('clearNodes',{clearMsg:true,forceHeavy:true});};api_clearInput=()=>{this.resetStreamState('clearInput',{forceHeavy:true});this.dom.clearInput();};api_clearOutput=()=>{this.dom.clearOutput();this.resetStreamState('clearOutput',{clearMsg:true,forceHeavy:true});};api_clearLive=()=>{this.dom.clearLive();this.resetStreamState('clearLive',{forceHeavy:true});};api_appendToolOutput=(c)=>this.toolOutput.append(c);api_updateToolOutput=(c)=>this.toolOutput.update(c);api_clearToolOutput=()=>this.toolOutput.clear();api_beginToolOutput=()=>this.toolOutput.begin();api_endToolOutput=()=>{this.toolOutput.end();this.scrollMgr.scheduleScroll();}
|
|
871
|
+
api_enableToolOutput=()=>this.toolOutput.enable();api_disableToolOutput=()=>this.toolOutput.disable();api_toggleToolOutput=(id)=>this.toolOutput.toggle(id);api_appendExtra=(id,c)=>this.nodes.appendExtra(id,c,this.scrollMgr);api_removeNode=(id)=>this.nodes.removeNode(id,this.scrollMgr);api_removeNodesFromId=(id)=>this.nodes.removeNodesFromId(id,this.scrollMgr);api_replaceLive=(content)=>{const el=this.dom.get('_append_live_');if(!el)return;if(el.classList.contains('hidden')){el.classList.remove('hidden');el.classList.add('visible');}
|
|
871
872
|
el.innerHTML=content;try{const maybePromise=this.renderer.renderPendingMarkdown(el);const post=()=>{try{this.highlighter.observeNewCode(el,{deferLastIfStreaming:true,minLinesForLast:this.cfg.PROFILE_CODE.minLinesForHL,minCharsForLast:this.cfg.PROFILE_CODE.minCharsForHL},this.stream.activeCode);this.highlighter.observeMsgBoxes(el,(box)=>{this.highlighter.observeNewCode(box,{deferLastIfStreaming:true,minLinesForLast:this.cfg.PROFILE_CODE.minLinesForHL,minCharsForLast:this.cfg.PROFILE_CODE.minCharsForHL},this.stream.activeCode);this.codeScroll.initScrollableBlocks(box);});}catch(_){}
|
|
872
873
|
try{const mm=getMathMode();if(mm==='finalize-only')this.math.schedule(el,0,true);else this.math.schedule(el);}catch(_){}
|
|
873
874
|
this.scrollMgr.scheduleScroll();};if(maybePromise&&typeof maybePromise.then==='function'){maybePromise.then(post);}else{post();}}catch(_){this.scrollMgr.scheduleScroll();}};api_updateFooter=(html)=>{const el=this.dom.get('_footer_');if(el)el.innerHTML=html;};api_enableEditIcons=()=>this.ui.enableEditIcons();api_disableEditIcons=()=>this.ui.disableEditIcons();api_enableTimestamp=()=>this.ui.enableTimestamp();api_disableTimestamp=()=>this.ui.disableTimestamp();api_enableBlocks=()=>this.ui.enableBlocks();api_disableBlocks=()=>this.ui.disableBlocks();api_updateCSS=(styles)=>this.ui.updateCSS(styles);api_getScrollPosition=()=>{this.bridge.updateScrollPosition(window.scrollY);};api_setScrollPosition=(pos)=>{try{window.scrollTo(0,pos);this.scrollMgr.prevScroll=parseInt(pos);}catch(_){}};api_showLoading=()=>this.loading.show();api_hideLoading=()=>this.loading.hide();api_restoreCollapsedCode=(root)=>this.renderer.restoreCollapsedCode(root);api_scrollToTopUser=()=>this.scrollMgr.scrollToTopUser();api_scrollToBottomUser=()=>this.scrollMgr.scrollToBottomUser();api_showTips=()=>this.tips.show();api_hideTips=()=>this.tips.hide();api_getCustomMarkupRules=()=>this.customMarkup.getRules();api_setCustomMarkupRules=(rules)=>{this.customMarkup.setRules(rules);try{this.stream.setCustomFenceSpecs(this.customMarkup.getSourceFenceSpecs());}catch(_){}};init(){this.highlighter.initHLJS();this.dom.init();this.ui.ensureStickyHeaderStyle();this.tips=new TipsManager(this.dom);this.events.install();this.bridge.initQWebChannel(this.cfg.PID,(bridge)=>{const onChunk=(name,chunk,type)=>this.api_onChunk(name,chunk,type);const onNode=(payload)=>this.api_appendNode(payload);const onNodeReplace=(payload)=>this.api_replaceNodes(payload);const onNodeInput=(html)=>this.api_appendToInput(html);this.bridge.connect(onChunk,onNode,onNodeReplace,onNodeInput);try{this.logger.bindBridge(this.bridge.bridge||this.bridge);}catch(_){}});this.renderer.init();try{this.renderer.renderPendingMarkdown(document);}catch(_){}
|
|
@@ -658,6 +658,7 @@ event.control.voice_cmd.toggle = Sprachsteuerung: Umschalten
|
|
|
658
658
|
event.control.voice_msg.start = Spracheingabe: Start
|
|
659
659
|
event.control.voice_msg.stop = Spracheingabe: Stopp
|
|
660
660
|
event.control.voice_msg.toggle = Spracheingabe: Umschalten
|
|
661
|
+
exit.msg = Gefällt dir PyGPT? Unterstütze die Entwicklung des Projekts:
|
|
661
662
|
expert.wait.failed: Aufruf des Experten fehlgeschlagen
|
|
662
663
|
expert.wait.status: Warten auf Experten...
|
|
663
664
|
files.delete.confirm = Datei/Verzeichnis löschen?
|
|
@@ -734,6 +735,7 @@ input.search.placeholder = Suchen...
|
|
|
734
735
|
input.send_clear = Nach dem Senden löschen
|
|
735
736
|
input.stream = Streamen
|
|
736
737
|
input.tab = Eingabe
|
|
738
|
+
input.tab.tooltip = {chars} Zeichen (~{tokens} Token)
|
|
737
739
|
interpreter.all = Verlauf ausführen (alle)
|
|
738
740
|
interpreter.auto_clear = Bei Senden löschen
|
|
739
741
|
interpreter.btn.clear = Ausgabe löschen
|
|
@@ -98,6 +98,7 @@ agent.name.worker = Worker
|
|
|
98
98
|
agent.option.model = Model
|
|
99
99
|
agent.option.name = Name
|
|
100
100
|
agent.option.prompt = Prompt
|
|
101
|
+
agent.option.prompt.refine.desc = Prompt for plan refining
|
|
101
102
|
agent.option.prompt.b1.desc = Prompt for bot 1
|
|
102
103
|
agent.option.prompt.b2.desc = Prompt for bot 2
|
|
103
104
|
agent.option.prompt.base.desc = Prompt for Base Agent
|
|
@@ -107,11 +108,14 @@ agent.option.prompt.planner.desc = Prompt for Planner agent
|
|
|
107
108
|
agent.option.prompt.search.desc = Prompt for search agent
|
|
108
109
|
agent.option.prompt.supervisor.desc = Prompt for Supervisor
|
|
109
110
|
agent.option.prompt.worker.desc = Prompt for Worker
|
|
111
|
+
agent.option.refine.after_each = After each step
|
|
112
|
+
agent.option.refine.after_each.desc = Refine plan after each step
|
|
110
113
|
agent.option.role = Short description of the agent's operation for instructing the model (optional)
|
|
111
114
|
agent.option.section.base = Base agent
|
|
112
115
|
agent.option.section.chooser = Chooser
|
|
113
116
|
agent.option.section.feedback = Feedback
|
|
114
117
|
agent.option.section.planner = Planner
|
|
118
|
+
agent.option.section.refine = Refine plan
|
|
115
119
|
agent.option.section.search = Search
|
|
116
120
|
agent.option.section.supervisor = Supervisor
|
|
117
121
|
agent.option.section.worker = Worker
|
|
@@ -663,6 +667,7 @@ event.control.voice_cmd.toggle = Voice control: Toggle
|
|
|
663
667
|
event.control.voice_msg.start = Voice input: Start
|
|
664
668
|
event.control.voice_msg.stop = Voice input: Stop
|
|
665
669
|
event.control.voice_msg.toggle = Voice input: Toggle
|
|
670
|
+
exit.msg = Do you like PyGPT? Support the development of the project:
|
|
666
671
|
expert.wait.failed: Failed calling expert
|
|
667
672
|
expert.wait.status: Waiting for expert...
|
|
668
673
|
files.delete.confirm = Delete file/directory?
|
|
@@ -739,6 +744,7 @@ input.search.placeholder = Search...
|
|
|
739
744
|
input.send_clear = Clear on send
|
|
740
745
|
input.stream = Stream
|
|
741
746
|
input.tab = Input
|
|
747
|
+
input.tab.tooltip = {chars} chars (~{tokens} tokens)
|
|
742
748
|
interpreter.all = Execute history (all)
|
|
743
749
|
interpreter.auto_clear = Clear output on send
|
|
744
750
|
interpreter.btn.clear = Clear output
|
|
@@ -1723,3 +1729,4 @@ vision.capture.manual.captured.success = Image captured from the camera:
|
|
|
1723
1729
|
vision.capture.name.prefix = Camera capture:
|
|
1724
1730
|
vision.capture.options.title = Video capture
|
|
1725
1731
|
vision.checkbox.tooltip = If checked, the vision model is active. It will be automatically activated upon image upload. You can deactivate it in real-time.
|
|
1732
|
+
agent.option.prompt.desc = Prompt for agent
|
|
@@ -659,6 +659,7 @@ event.control.voice_cmd.toggle = Control de voz: Conmutar
|
|
|
659
659
|
event.control.voice_msg.start = Entrada de voz: Iniciar
|
|
660
660
|
event.control.voice_msg.stop = Entrada de voz: Detener
|
|
661
661
|
event.control.voice_msg.toggle = Entrada de voz: Conmutar
|
|
662
|
+
exit.msg = ¿Te gusta PyGPT? Apoya el desarrollo del proyecto:
|
|
662
663
|
expert.wait.failed: Error al llamar al experto
|
|
663
664
|
expert.wait.status: Esperando al experto...
|
|
664
665
|
files.delete.confirm = ¿Eliminar archivo/directorio?
|
|
@@ -735,6 +736,7 @@ input.search.placeholder = Buscar...
|
|
|
735
736
|
input.send_clear = Limpiar al enviar
|
|
736
737
|
input.stream = Transmisión
|
|
737
738
|
input.tab = Entrada
|
|
739
|
+
input.tab.tooltip = {chars} caracteres (~{tokens} fichas)
|
|
738
740
|
interpreter.all = Ejecutar historial (todo)
|
|
739
741
|
interpreter.auto_clear = Limpiar al enviar
|
|
740
742
|
interpreter.btn.clear = Limpiar salida
|
|
@@ -658,6 +658,7 @@ event.control.voice_cmd.toggle = Commande vocale : Basculer
|
|
|
658
658
|
event.control.voice_msg.start = Entrée vocale : Commencer
|
|
659
659
|
event.control.voice_msg.stop = Entrée vocale : Arrêter
|
|
660
660
|
event.control.voice_msg.toggle = Entrée vocale : Basculer
|
|
661
|
+
exit.msg = PyGPT vous plaît-il ? Soutenez le développement du projet :
|
|
661
662
|
expert.wait.failed: Échec de l'appel à l'expert
|
|
662
663
|
expert.wait.status: En attente de l'expert...
|
|
663
664
|
files.delete.confirm = Supprimer le fichier/répertoire ?
|
|
@@ -734,6 +735,7 @@ input.search.placeholder = Rechercher...
|
|
|
734
735
|
input.send_clear = Effacer après envoi
|
|
735
736
|
input.stream = Flux
|
|
736
737
|
input.tab = Entrée
|
|
738
|
+
input.tab.tooltip = {chars} caractères (~{tokens} jetons)
|
|
737
739
|
interpreter.all = Exécuter l'historique (tous)
|
|
738
740
|
interpreter.auto_clear = Effacer à l'envoi
|
|
739
741
|
interpreter.btn.clear = Effacer la sortie
|
|
@@ -658,6 +658,7 @@ event.control.voice_cmd.toggle = Comando vocale: Commuta
|
|
|
658
658
|
event.control.voice_msg.start = Input vocale: Avvia
|
|
659
659
|
event.control.voice_msg.stop = Input vocale: Arresta
|
|
660
660
|
event.control.voice_msg.toggle = Input vocale: Commuta
|
|
661
|
+
exit.msg = Ti piace PyGPT? Sostieni lo sviluppo del progetto:
|
|
661
662
|
expert.wait.failed: Chiamata all'esperto non riuscita
|
|
662
663
|
expert.wait.status: In attesa dell'esperto...
|
|
663
664
|
files.delete.confirm = Eliminare il file/la cartella?
|
|
@@ -734,6 +735,7 @@ input.search.placeholder = Cerca...
|
|
|
734
735
|
input.send_clear = Pulisci dopo l'invio
|
|
735
736
|
input.stream = Flusso
|
|
736
737
|
input.tab = Input
|
|
738
|
+
input.tab.tooltip = {chars} caratteri (~{tokens} token)
|
|
737
739
|
interpreter.all = Esegui cronologia (tutto)
|
|
738
740
|
interpreter.auto_clear = Cancella all'invio
|
|
739
741
|
interpreter.btn.clear = Cancella output
|
|
@@ -244,7 +244,7 @@ clipboard.copied_to = Skopiowano do schowka:
|
|
|
244
244
|
cmd.enabled = + Narzędzia
|
|
245
245
|
cmd.tip = Wskazówka: Aby umożliwić wykonanie narzędzi z wtyczek, musisz włączyć opcję "+ Narzędzia".
|
|
246
246
|
coming_soon = Dostępne wkrótce...
|
|
247
|
-
common.down = Przesuń w dół
|
|
247
|
+
common.down = Przesuń w dół
|
|
248
248
|
common.up = Przesuń w górę
|
|
249
249
|
confirm.assistant.delete = Na pewno usunąć asystenta?
|
|
250
250
|
confirm.assistant.files.clear = Wyczyścić pliki (tylko lokalnie)?
|
|
@@ -659,6 +659,7 @@ event.control.voice_cmd.toggle = Kontrola głosowa: Przełącz
|
|
|
659
659
|
event.control.voice_msg.start = Wejście głosowe: Rozpocznij
|
|
660
660
|
event.control.voice_msg.stop = Wejście głosowe: Zatrzymaj
|
|
661
661
|
event.control.voice_msg.toggle = Wejście głosowe: Przełącz
|
|
662
|
+
exit.msg = Podoba Ci się PyGPT? Wesprzyj rozwój projektu:
|
|
662
663
|
expert.wait.failed: Nie udało się wywołać eksperta
|
|
663
664
|
expert.wait.status: Oczekiwanie na eksperta...
|
|
664
665
|
files.delete.confirm = Usunąć plik/katalog?
|
|
@@ -735,6 +736,7 @@ input.search.placeholder = Szukaj...
|
|
|
735
736
|
input.send_clear = Wyczyść po wysłaniu
|
|
736
737
|
input.stream = Stream
|
|
737
738
|
input.tab = Input
|
|
739
|
+
input.tab.tooltip = {chars} znaków (~{tokens} tokenów)
|
|
738
740
|
interpreter.all = Wykonaj historię (wszystko)
|
|
739
741
|
interpreter.auto_clear = Wyczyść wyjście przy wysyłaniu
|
|
740
742
|
interpreter.btn.clear = Wyczyść wyjście
|
|
@@ -658,6 +658,7 @@ event.control.voice_cmd.toggle = Керування голосом: Перемк
|
|
|
658
658
|
event.control.voice_msg.start = Голосовий вхід: Розпочати
|
|
659
659
|
event.control.voice_msg.stop = Голосовий вхід: Зупинити
|
|
660
660
|
event.control.voice_msg.toggle = Голосовий вхід: Перемкнути
|
|
661
|
+
exit.msg = Вам подобається PyGPT? Підтримайте розвиток проєкту:
|
|
661
662
|
expert.wait.failed: Виклик експерта не вдався
|
|
662
663
|
expert.wait.status: Очікування експерта...
|
|
663
664
|
files.delete.confirm = Видалити файл/директорію?
|
|
@@ -734,6 +735,7 @@ input.search.placeholder = Пошук...
|
|
|
734
735
|
input.send_clear = Очистити після відправлення
|
|
735
736
|
input.stream = Потік
|
|
736
737
|
input.tab = Введення
|
|
738
|
+
input.tab.tooltip = {chars} символів (~{tokens} токенів)
|
|
737
739
|
interpreter.all = Виконати історію (все)
|
|
738
740
|
interpreter.auto_clear = Очистити при відправці
|
|
739
741
|
interpreter.btn.clear = Очистити вивід
|