pygpt-net 2.6.9__py3-none-any.whl → 2.6.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +8 -0
- pygpt_net/__init__.py +1 -1
- pygpt_net/controller/ctx/common.py +9 -3
- pygpt_net/controller/ctx/ctx.py +19 -17
- pygpt_net/core/agents/runner.py +19 -0
- pygpt_net/core/agents/tools.py +93 -52
- pygpt_net/data/config/config.json +3 -2
- pygpt_net/data/config/models.json +2 -2
- pygpt_net/data/config/settings.json +14 -0
- pygpt_net/data/locale/locale.de.ini +2 -0
- pygpt_net/data/locale/locale.en.ini +2 -0
- pygpt_net/data/locale/locale.es.ini +2 -0
- pygpt_net/data/locale/locale.fr.ini +2 -0
- pygpt_net/data/locale/locale.it.ini +2 -0
- pygpt_net/data/locale/locale.pl.ini +3 -1
- pygpt_net/data/locale/locale.uk.ini +2 -0
- pygpt_net/data/locale/locale.zh.ini +2 -0
- pygpt_net/plugin/google/config.py +306 -1
- pygpt_net/plugin/google/plugin.py +22 -0
- pygpt_net/plugin/google/worker.py +579 -3
- pygpt_net/provider/core/config/patch.py +11 -0
- pygpt_net/ui/main.py +1 -1
- pygpt_net/ui/widget/lists/context.py +10 -1
- {pygpt_net-2.6.9.dist-info → pygpt_net-2.6.10.dist-info}/METADATA +78 -29
- {pygpt_net-2.6.9.dist-info → pygpt_net-2.6.10.dist-info}/RECORD +28 -28
- {pygpt_net-2.6.9.dist-info → pygpt_net-2.6.10.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.9.dist-info → pygpt_net-2.6.10.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.9.dist-info → pygpt_net-2.6.10.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt
CHANGED
|
@@ -1,3 +1,11 @@
|
|
|
1
|
+
2.6.10 (2025-08-17)
|
|
2
|
+
|
|
3
|
+
- Enhanced the handling of the context list.
|
|
4
|
+
- Integrated RAG into OpenAI Agents.
|
|
5
|
+
- Enhanced RAG management in Agents.
|
|
6
|
+
- Added an option: Config -> Agents -> General -> Auto-retrieve additional context from RAG.
|
|
7
|
+
- Included Google Docs, Maps, and Colab in the Google plugin.
|
|
8
|
+
|
|
1
9
|
2.6.9 (2025-08-17)
|
|
2
10
|
|
|
3
11
|
- Added two new agents for LlamaIndex and OpenAI: Supervisor and Worker (beta).
|
pygpt_net/__init__.py
CHANGED
|
@@ -13,7 +13,7 @@ __author__ = "Marcin Szczygliński"
|
|
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
|
15
15
|
__license__ = "MIT"
|
|
16
|
-
__version__ = "2.6.
|
|
16
|
+
__version__ = "2.6.10"
|
|
17
17
|
__build__ = "2025-08-17"
|
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
|
@@ -31,7 +31,8 @@ class Common:
|
|
|
31
31
|
self.summarizer = Summarizer(window)
|
|
32
32
|
|
|
33
33
|
def _update_ctx_no_scroll(self):
|
|
34
|
-
|
|
34
|
+
"""Update ctx list without scroll"""
|
|
35
|
+
self.window.controller.ctx.update_and_restore()
|
|
35
36
|
|
|
36
37
|
def update_label_by_current(self):
|
|
37
38
|
"""Update ctx label from current ctx"""
|
|
@@ -83,7 +84,7 @@ class Common:
|
|
|
83
84
|
if new_id is not None:
|
|
84
85
|
self.window.core.attachments.context.duplicate(meta_id, new_id)
|
|
85
86
|
self.window.update_status(f"Context duplicated, new ctx id: {new_id}")
|
|
86
|
-
QTimer.singleShot(
|
|
87
|
+
QTimer.singleShot(10, self._update_ctx_no_scroll)
|
|
87
88
|
|
|
88
89
|
def dismiss_rename(self):
|
|
89
90
|
"""Dismiss rename dialog"""
|
|
@@ -97,7 +98,12 @@ class Common:
|
|
|
97
98
|
"""
|
|
98
99
|
data_id = meta.id if meta else None
|
|
99
100
|
title = meta.name if meta else None
|
|
100
|
-
self.window.controller.ui.tabs.focus_by_type(
|
|
101
|
+
self.window.controller.ui.tabs.focus_by_type(
|
|
102
|
+
Tab.TAB_CHAT,
|
|
103
|
+
data_id=data_id,
|
|
104
|
+
title=title,
|
|
105
|
+
meta=meta
|
|
106
|
+
)
|
|
101
107
|
|
|
102
108
|
def restore_display_filter(self):
|
|
103
109
|
"""Restore display filter"""
|
pygpt_net/controller/ctx/ctx.py
CHANGED
|
@@ -334,17 +334,20 @@ class Ctx:
|
|
|
334
334
|
if id is not None:
|
|
335
335
|
self.select(id)
|
|
336
336
|
|
|
337
|
-
def update_list(self, reload: bool = False):
|
|
337
|
+
def update_list(self, reload: bool = False, restore_scroll: bool = False):
|
|
338
338
|
"""
|
|
339
339
|
Reload current ctx list
|
|
340
340
|
|
|
341
341
|
:param reload: reload ctx list items
|
|
342
|
+
:param restore_scroll: restore scroll position
|
|
342
343
|
"""
|
|
343
344
|
self.window.ui.contexts.ctx_list.update(
|
|
344
345
|
'ctx.list',
|
|
345
346
|
self.window.core.ctx.get_meta(reload),
|
|
346
347
|
expand=False,
|
|
347
348
|
)
|
|
349
|
+
if restore_scroll:
|
|
350
|
+
self.window.ui.nodes['ctx.list'].restore_scroll_position()
|
|
348
351
|
|
|
349
352
|
def refresh(self, restore_model: bool = True):
|
|
350
353
|
"""
|
|
@@ -520,7 +523,7 @@ class Ctx:
|
|
|
520
523
|
self.window.core.ctx.clear_current()
|
|
521
524
|
event = RenderEvent(RenderEvent.CLEAR_OUTPUT)
|
|
522
525
|
self.window.dispatch(event)
|
|
523
|
-
self.
|
|
526
|
+
self.update_and_restore()
|
|
524
527
|
|
|
525
528
|
self.window.controller.ui.tabs.update_title_current("...")
|
|
526
529
|
|
|
@@ -653,8 +656,7 @@ class Ctx:
|
|
|
653
656
|
if meta is not None:
|
|
654
657
|
meta.important = value
|
|
655
658
|
self.window.core.ctx.save(id)
|
|
656
|
-
self.
|
|
657
|
-
self.select_by_current()
|
|
659
|
+
self.update_and_restore()
|
|
658
660
|
|
|
659
661
|
def is_important(self, idx: int) -> bool:
|
|
660
662
|
"""
|
|
@@ -686,9 +688,15 @@ class Ctx:
|
|
|
686
688
|
self.window.core.ctx.save(id)
|
|
687
689
|
QTimer.singleShot(
|
|
688
690
|
10,
|
|
689
|
-
lambda: self.
|
|
691
|
+
lambda: self.update_and_restore()
|
|
690
692
|
)
|
|
691
693
|
|
|
694
|
+
def update_and_restore(self):
|
|
695
|
+
"""Update ctx and restore scroll position"""
|
|
696
|
+
self.window.ui.nodes['ctx.list'].store_scroll_position()
|
|
697
|
+
self.update()
|
|
698
|
+
self.window.ui.nodes['ctx.list'].restore_scroll_position()
|
|
699
|
+
|
|
692
700
|
def update_name(
|
|
693
701
|
self,
|
|
694
702
|
id: int,
|
|
@@ -713,7 +721,7 @@ class Ctx:
|
|
|
713
721
|
self.window.ui.dialog['rename'].close()
|
|
714
722
|
|
|
715
723
|
if refresh:
|
|
716
|
-
self.
|
|
724
|
+
self.update_and_restore()
|
|
717
725
|
else:
|
|
718
726
|
self.update(reload=True, all=False, no_scroll=True)
|
|
719
727
|
|
|
@@ -964,7 +972,7 @@ class Ctx:
|
|
|
964
972
|
if update:
|
|
965
973
|
QTimer.singleShot(
|
|
966
974
|
10,
|
|
967
|
-
lambda: self.
|
|
975
|
+
lambda: self.update_and_restore()
|
|
968
976
|
)
|
|
969
977
|
|
|
970
978
|
def remove_from_group(self, meta_id):
|
|
@@ -977,7 +985,7 @@ class Ctx:
|
|
|
977
985
|
self.group_id = None
|
|
978
986
|
QTimer.singleShot(
|
|
979
987
|
10,
|
|
980
|
-
lambda: self.
|
|
988
|
+
lambda: self.update_and_restore()
|
|
981
989
|
)
|
|
982
990
|
|
|
983
991
|
def new_group(
|
|
@@ -1063,13 +1071,7 @@ class Ctx:
|
|
|
1063
1071
|
self.window.core.ctx.update_group(group)
|
|
1064
1072
|
if close:
|
|
1065
1073
|
self.window.ui.dialog['rename'].close()
|
|
1066
|
-
self.
|
|
1067
|
-
reload=True,
|
|
1068
|
-
all=False,
|
|
1069
|
-
select=False,
|
|
1070
|
-
no_scroll=True
|
|
1071
|
-
)
|
|
1072
|
-
self.select_group(id)
|
|
1074
|
+
self.update_and_restore()
|
|
1073
1075
|
|
|
1074
1076
|
def get_group_name(self, id: int) -> str:
|
|
1075
1077
|
"""
|
|
@@ -1119,7 +1121,7 @@ class Ctx:
|
|
|
1119
1121
|
self.window.core.ctx.remove_group(group, all=False)
|
|
1120
1122
|
if self.group_id == id:
|
|
1121
1123
|
self.group_id = None
|
|
1122
|
-
self.
|
|
1124
|
+
self.update_and_restore()
|
|
1123
1125
|
|
|
1124
1126
|
def delete_group_all(
|
|
1125
1127
|
self,
|
|
@@ -1144,7 +1146,7 @@ class Ctx:
|
|
|
1144
1146
|
self.window.core.ctx.remove_group(group, all=True)
|
|
1145
1147
|
if self.group_id == id:
|
|
1146
1148
|
self.group_id = None
|
|
1147
|
-
self.
|
|
1149
|
+
self.update_and_restore()
|
|
1148
1150
|
|
|
1149
1151
|
def reload(self):
|
|
1150
1152
|
"""Reload ctx"""
|
pygpt_net/core/agents/runner.py
CHANGED
|
@@ -105,6 +105,25 @@ class Runner:
|
|
|
105
105
|
self.window.core.agents.tools.context = context
|
|
106
106
|
self.window.core.agents.tools.agent_idx = vector_store_idx
|
|
107
107
|
|
|
108
|
+
# --- ADDITIONAL CONTEXT ---
|
|
109
|
+
# append additional context from RAG if available
|
|
110
|
+
if vector_store_idx and self.window.core.config.get("agent.idx.auto_retrieve", True):
|
|
111
|
+
ad_context = self.window.core.idx.chat.query_retrieval(
|
|
112
|
+
query=prompt,
|
|
113
|
+
idx=vector_store_idx,
|
|
114
|
+
model=context.model,
|
|
115
|
+
)
|
|
116
|
+
if ad_context:
|
|
117
|
+
to_append = ""
|
|
118
|
+
if ctx.hidden_input is None:
|
|
119
|
+
ctx.hidden_input = ""
|
|
120
|
+
if not ctx.hidden_input: # may be not empty (appended before from attachments)
|
|
121
|
+
to_append = "ADDITIONAL CONTEXT:"
|
|
122
|
+
ctx.hidden_input += to_append
|
|
123
|
+
to_append += "\n" + ad_context
|
|
124
|
+
ctx.hidden_input += to_append
|
|
125
|
+
prompt += "\n\n" + to_append
|
|
126
|
+
|
|
108
127
|
tools = self.window.core.agents.tools.prepare(context, extra, force=True)
|
|
109
128
|
function_tools = self.window.core.agents.tools.get_function_tools(ctx, extra, force=True)
|
|
110
129
|
plugin_tools = self.window.core.agents.tools.get_plugin_tools(context, extra, force=True)
|
pygpt_net/core/agents/tools.py
CHANGED
|
@@ -25,6 +25,13 @@ from pygpt_net.item.ctx import CtxItem
|
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
class Tools:
|
|
28
|
+
|
|
29
|
+
QUERY_ENGINE_TOOL_NAME = "rag_get_context"
|
|
30
|
+
QUERY_ENGINE_TOOL_DESCRIPTION = "Get additional context for provided question. Use this whenever you need additional context to provide an answer. "
|
|
31
|
+
QUERY_ENGINE_TOOL_SPEC = ("**"+QUERY_ENGINE_TOOL_NAME+"**: "
|
|
32
|
+
+ QUERY_ENGINE_TOOL_DESCRIPTION +
|
|
33
|
+
"available params: {'query': {'type': 'string', 'description': 'query string'}}, required: [query]")
|
|
34
|
+
|
|
28
35
|
def __init__(self, window=None):
|
|
29
36
|
"""
|
|
30
37
|
Agent tools
|
|
@@ -62,6 +69,32 @@ class Tools:
|
|
|
62
69
|
plugin_functions = self.get_plugin_functions(context.ctx, verbose=verbose, force=force)
|
|
63
70
|
tools.extend(plugin_functions)
|
|
64
71
|
|
|
72
|
+
# add query engine tool if idx is provided
|
|
73
|
+
query_engine_tools = self.get_retriever_tool(
|
|
74
|
+
context=context,
|
|
75
|
+
extra=extra,
|
|
76
|
+
verbose=verbose,
|
|
77
|
+
)
|
|
78
|
+
if query_engine_tools:
|
|
79
|
+
tools.extend(query_engine_tools)
|
|
80
|
+
return tools
|
|
81
|
+
|
|
82
|
+
def get_retriever_tool(
|
|
83
|
+
self,
|
|
84
|
+
context: BridgeContext,
|
|
85
|
+
extra: Dict[str, Any],
|
|
86
|
+
verbose: bool = False
|
|
87
|
+
) -> List[BaseTool]:
|
|
88
|
+
"""
|
|
89
|
+
Prepare tools for agent
|
|
90
|
+
|
|
91
|
+
:param context: BridgeContext
|
|
92
|
+
:param extra: extra data
|
|
93
|
+
:param verbose: verbose mode
|
|
94
|
+
:return: list of tools
|
|
95
|
+
"""
|
|
96
|
+
tool = None
|
|
97
|
+
|
|
65
98
|
# add query engine tool if idx is provided
|
|
66
99
|
idx = extra.get("agent_idx", None)
|
|
67
100
|
if idx is not None and idx != "_":
|
|
@@ -69,19 +102,51 @@ class Tools:
|
|
|
69
102
|
index = self.window.core.idx.storage.get(idx, llm, embed_model) # get index
|
|
70
103
|
if index is not None:
|
|
71
104
|
query_engine = index.as_query_engine(similarity_top_k=3)
|
|
72
|
-
|
|
105
|
+
tool = [
|
|
73
106
|
QueryEngineTool(
|
|
74
107
|
query_engine=query_engine,
|
|
75
108
|
metadata=ToolMetadata(
|
|
76
|
-
name=
|
|
77
|
-
description=
|
|
78
|
-
"Provides additional context and access to the indexed documents."
|
|
79
|
-
),
|
|
109
|
+
name=self.QUERY_ENGINE_TOOL_NAME,
|
|
110
|
+
description=self.QUERY_ENGINE_TOOL_DESCRIPTION,
|
|
80
111
|
),
|
|
81
112
|
),
|
|
82
113
|
]
|
|
83
|
-
|
|
84
|
-
|
|
114
|
+
return tool
|
|
115
|
+
|
|
116
|
+
def get_openai_retriever_tool(
|
|
117
|
+
self,
|
|
118
|
+
idx: str,
|
|
119
|
+
verbose: bool = False
|
|
120
|
+
) -> OpenAIFunctionTool:
|
|
121
|
+
"""
|
|
122
|
+
Prepare OpenAI retriever tool for agent
|
|
123
|
+
|
|
124
|
+
:param idx: index name
|
|
125
|
+
:param verbose: verbose mode
|
|
126
|
+
:return: OpenAIFunctionTool instance
|
|
127
|
+
"""
|
|
128
|
+
async def run_function(run_ctx: RunContextWrapper[Any], args: str) -> str:
|
|
129
|
+
name = run_ctx.tool_name
|
|
130
|
+
print("[Plugin] Tool call: " + name + " with args: " + str(args))
|
|
131
|
+
cmd = {
|
|
132
|
+
"cmd": name,
|
|
133
|
+
"params": json.loads(args) # args should be a JSON string
|
|
134
|
+
}
|
|
135
|
+
return self.tool_exec(name, cmd["params"])
|
|
136
|
+
|
|
137
|
+
schema = {"type": "object", "properties": {
|
|
138
|
+
"query": {
|
|
139
|
+
"type": "string",
|
|
140
|
+
"description": "The query string to search in the index."
|
|
141
|
+
}
|
|
142
|
+
}, "additionalProperties": False}
|
|
143
|
+
description = self.QUERY_ENGINE_TOOL_DESCRIPTION + f" Index: {idx}"
|
|
144
|
+
return OpenAIFunctionTool(
|
|
145
|
+
name=self.QUERY_ENGINE_TOOL_NAME,
|
|
146
|
+
description=description,
|
|
147
|
+
params_json_schema=schema,
|
|
148
|
+
on_invoke_tool=run_function,
|
|
149
|
+
)
|
|
85
150
|
|
|
86
151
|
def get_plugin_functions(
|
|
87
152
|
self,
|
|
@@ -204,6 +269,11 @@ class Tools:
|
|
|
204
269
|
tools.append(tool)
|
|
205
270
|
except Exception as e:
|
|
206
271
|
print(e)
|
|
272
|
+
|
|
273
|
+
# append query engine tool if idx is provided
|
|
274
|
+
if self.agent_idx is not None and self.agent_idx != "_":
|
|
275
|
+
tools.append(self.get_openai_retriever_tool(self.agent_idx))
|
|
276
|
+
|
|
207
277
|
return tools
|
|
208
278
|
|
|
209
279
|
def get_plugin_tools(
|
|
@@ -230,7 +300,6 @@ class Tools:
|
|
|
230
300
|
continue # skip blacklisted commands
|
|
231
301
|
|
|
232
302
|
description = item['desc']
|
|
233
|
-
schema = json.loads(item['params']) # from JSON to dict
|
|
234
303
|
|
|
235
304
|
def make_func(name, description):
|
|
236
305
|
def func(**kwargs):
|
|
@@ -255,9 +324,17 @@ class Tools:
|
|
|
255
324
|
print(e)
|
|
256
325
|
|
|
257
326
|
# add query engine tool if idx is provided
|
|
258
|
-
if self.agent_idx is not None and self.agent_idx
|
|
259
|
-
|
|
260
|
-
|
|
327
|
+
if self.agent_idx is not None and self.agent_idx != "_":
|
|
328
|
+
extra = {
|
|
329
|
+
"agent_idx": self.agent_idx, # agent index for query engine tool
|
|
330
|
+
}
|
|
331
|
+
query_engine_tools = self.get_retriever_tool(
|
|
332
|
+
context=context,
|
|
333
|
+
extra=extra,
|
|
334
|
+
verbose=verbose,
|
|
335
|
+
)
|
|
336
|
+
if query_engine_tools:
|
|
337
|
+
tools["query_engine"] = query_engine_tools[0] # add query engine tool
|
|
261
338
|
return tools
|
|
262
339
|
|
|
263
340
|
|
|
@@ -281,9 +358,7 @@ class Tools:
|
|
|
281
358
|
|
|
282
359
|
# add query engine tool spec if idx is provided
|
|
283
360
|
if self.agent_idx is not None and self.agent_idx != "_":
|
|
284
|
-
specs.append(
|
|
285
|
-
"Provides additional context and access to the indexed documents, "
|
|
286
|
-
"available params: {'query': {'type': 'string', 'description': 'query string'}}, required: [query]")
|
|
361
|
+
specs.append(self.QUERY_ENGINE_TOOL_SPEC)
|
|
287
362
|
|
|
288
363
|
for func in functions:
|
|
289
364
|
try:
|
|
@@ -292,8 +367,9 @@ class Tools:
|
|
|
292
367
|
continue # skip blacklisted commands
|
|
293
368
|
description = func['desc']
|
|
294
369
|
schema = json.loads(func['params']) # from JSON to dict
|
|
295
|
-
|
|
296
|
-
|
|
370
|
+
specs.append(
|
|
371
|
+
f"**{name}**: {description}, available params: {schema.get('properties', {})}, required: {schema.get('required', [])}\n"
|
|
372
|
+
)
|
|
297
373
|
except Exception as e:
|
|
298
374
|
print(e)
|
|
299
375
|
return specs
|
|
@@ -308,7 +384,7 @@ class Tools:
|
|
|
308
384
|
"""
|
|
309
385
|
print("[Plugin] Tool call: " + cmd + " " + str(params))
|
|
310
386
|
# special case for query engine tool
|
|
311
|
-
if cmd ==
|
|
387
|
+
if cmd == self.QUERY_ENGINE_TOOL_NAME:
|
|
312
388
|
if "query" not in params:
|
|
313
389
|
return "Query parameter is required for query_engine tool."
|
|
314
390
|
if self.context is None:
|
|
@@ -340,41 +416,6 @@ class Tools:
|
|
|
340
416
|
)
|
|
341
417
|
return response
|
|
342
418
|
|
|
343
|
-
def get_retriever_tool(
|
|
344
|
-
self,
|
|
345
|
-
context: BridgeContext,
|
|
346
|
-
extra: Dict[str, Any],
|
|
347
|
-
verbose: bool = False
|
|
348
|
-
) -> List[BaseTool]:
|
|
349
|
-
"""
|
|
350
|
-
Prepare tools for agent
|
|
351
|
-
|
|
352
|
-
:param context: BridgeContext
|
|
353
|
-
:param extra: extra data
|
|
354
|
-
:param verbose: verbose mode
|
|
355
|
-
:return: list of tools
|
|
356
|
-
"""
|
|
357
|
-
tool = None
|
|
358
|
-
# add query engine tool if idx is provided
|
|
359
|
-
idx = extra.get("agent_idx", None)
|
|
360
|
-
if idx is not None and idx != "_":
|
|
361
|
-
llm, embed_model = self.window.core.idx.llm.get_service_context(model=context.model)
|
|
362
|
-
index = self.window.core.idx.storage.get(idx, llm, embed_model) # get index
|
|
363
|
-
if index is not None:
|
|
364
|
-
query_engine = index.as_query_engine(similarity_top_k=3)
|
|
365
|
-
tool = [
|
|
366
|
-
QueryEngineTool(
|
|
367
|
-
query_engine=query_engine,
|
|
368
|
-
metadata=ToolMetadata(
|
|
369
|
-
name="query_engine",
|
|
370
|
-
description=(
|
|
371
|
-
"Provides additional context and access to the indexed documents."
|
|
372
|
-
),
|
|
373
|
-
),
|
|
374
|
-
),
|
|
375
|
-
]
|
|
376
|
-
return tool
|
|
377
|
-
|
|
378
419
|
def export_sources(
|
|
379
420
|
self,
|
|
380
421
|
response: AgentChatResponse
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
3
|
+
"version": "2.6.10",
|
|
4
|
+
"app.version": "2.6.10",
|
|
5
5
|
"updated_at": "2025-08-17T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
@@ -50,6 +50,7 @@
|
|
|
50
50
|
"agent.func_call.native": false,
|
|
51
51
|
"agent.goal.notify": false,
|
|
52
52
|
"agent.idx": "base",
|
|
53
|
+
"agent.idx.auto_retrieve": true,
|
|
53
54
|
"agent.iterations": 3,
|
|
54
55
|
"agent.llama.idx": "base",
|
|
55
56
|
"agent.llama.loop.enabled": false,
|
|
@@ -1972,6 +1972,20 @@
|
|
|
1972
1972
|
"advanced": false,
|
|
1973
1973
|
"tab": "general"
|
|
1974
1974
|
},
|
|
1975
|
+
"agent.idx.auto_retrieve": {
|
|
1976
|
+
"section": "agent",
|
|
1977
|
+
"type": "bool",
|
|
1978
|
+
"slider": false,
|
|
1979
|
+
"label": "settings.agent.idx.auto_retrieve",
|
|
1980
|
+
"description": "settings.agent.idx.auto_retrieve.desc",
|
|
1981
|
+
"value": true,
|
|
1982
|
+
"min": 0,
|
|
1983
|
+
"max": 0,
|
|
1984
|
+
"multiplier": 1,
|
|
1985
|
+
"step": 1,
|
|
1986
|
+
"advanced": false,
|
|
1987
|
+
"tab": "general"
|
|
1988
|
+
},
|
|
1975
1989
|
"agent.mode": {
|
|
1976
1990
|
"section": "agent",
|
|
1977
1991
|
"type": "combo",
|
|
@@ -1003,6 +1003,8 @@ settings.agent.func_call.native = Verwende native API-Funktionsaufrufe
|
|
|
1003
1003
|
settings.agent.func_call.native.desc = Wenn aktiviert, wird die Anwendung native API-Funktionsaufrufe anstelle des internen pygpt-Formats verwenden und Befehle werden nicht verwendet. Nur autonomer Agentenmodus.
|
|
1004
1004
|
settings.agent.goal.notify = Zeige eine Benachrichtigung im Infobereich an, wenn das Ziel erreicht ist.
|
|
1005
1005
|
settings.agent.idx = Index
|
|
1006
|
+
settings.agent.idx.auto_retrieve = Automatisches Abrufen des zusätzlichen Kontexts von RAG
|
|
1007
|
+
settings.agent.idx.auto_retrieve.desc = Automatisches Abrufen des zusätzlichen Kontexts von RAG zu Beginn, wenn der Index bereitgestellt wird.
|
|
1006
1008
|
settings.agent.idx.desc = Nur wenn der interne Modus llama_index ist (Chat mit Dateien), wählen Sie den Index zur Verwendung im Agent- und Expertenmodus
|
|
1007
1009
|
settings.agent.llama.append_eval = Fügen Sie die vorherige Evaluierungs-Prompt zu der nächsten Evaluierungs-Prompt hinzu und vergleichen Sie sie
|
|
1008
1010
|
settings.agent.llama.append_eval.desc = Wenn aktiviert, wird die vorherige Verbesserungs-Prompt in der nächsten Evaluierung in der Schleife überprüft
|
|
@@ -1009,6 +1009,8 @@ settings.agent.func_call.native = Use native API function calls
|
|
|
1009
1009
|
settings.agent.func_call.native.desc = If enabled, the application will use native API function calls instead of the internal pygpt format and the command prompts will not be used. Autonomus agent mode only.
|
|
1010
1010
|
settings.agent.goal.notify = Display a tray notification when the goal is achieved.
|
|
1011
1011
|
settings.agent.idx = Index to use
|
|
1012
|
+
settings.agent.idx.auto_retrieve = Auto retrieve additional context from RAG
|
|
1013
|
+
settings.agent.idx.auto_retrieve.desc = Auto retrieve additional context from RAG at the beginning if the index is provided.
|
|
1012
1014
|
settings.agent.idx.desc = Only if sub-mode is Chat with Files, choose the index to use in Autonomous and Experts modes
|
|
1013
1015
|
settings.agent.llama.append_eval = Append and compare previous evaluation prompt in next evaluation
|
|
1014
1016
|
settings.agent.llama.append_eval.desc = If enabled, previous improvement prompt will be checked in next eval in loop
|
|
@@ -1004,6 +1004,8 @@ settings.agent.func_call.native = Usar llamadas de función API nativas
|
|
|
1004
1004
|
settings.agent.func_call.native.desc = Si está habilitado, la aplicación utilizará llamadas de función API nativas en lugar del formato interno pygpt y los comandos no se utilizarán. Solo modo Agente autónomo.
|
|
1005
1005
|
settings.agent.goal.notify = Mostrar una notificación en la bandeja del sistema cuando se logre el objetivo.
|
|
1006
1006
|
settings.agent.idx = Índice
|
|
1007
|
+
settings.agent.idx.auto_retrieve = Recuperación automática de contexto adicional de RAG
|
|
1008
|
+
settings.agent.idx.auto_retrieve.desc = Recuperación automática de contexto adicional de RAG al principio si se proporciona el índice.
|
|
1007
1009
|
settings.agent.idx.desc = Solo si el modo interno es llama_index (Chat con archivos), elija el índice a utilizar en los modos Agente y Experto
|
|
1008
1010
|
settings.agent.llama.append_eval = Añadir y comparar la evaluación anterior en la próxima evaluación
|
|
1009
1011
|
settings.agent.llama.append_eval.desc = Si está habilitado, se verificará el prompt de mejora anterior en la próxima evaluación en bucle
|
|
@@ -1003,6 +1003,8 @@ settings.agent.func_call.native = Utiliser des appels de fonction API natifs
|
|
|
1003
1003
|
settings.agent.func_call.native.desc = Si activé, l'application utilisera des appels de fonction API natifs au lieu du format interne pygpt et les commandes ne seront pas utilisées. Mode Agent autonome uniquement.
|
|
1004
1004
|
settings.agent.goal.notify = Afficher une notification dans la barre des tâches lorsque l'objectif est atteint.
|
|
1005
1005
|
settings.agent.idx = Index
|
|
1006
|
+
settings.agent.idx.auto_retrieve = Récupération automatique du contexte supplémentaire de RAG
|
|
1007
|
+
settings.agent.idx.auto_retrieve.desc = Récupération automatique du contexte supplémentaire de RAG au début si l'index est fourni.
|
|
1006
1008
|
settings.agent.idx.desc = Seulement si le mode interne est llama_index (Chat avec des fichiers), choisissez l'index à utiliser en modes Agent et Expert
|
|
1007
1009
|
settings.agent.llama.append_eval = Ajouter et comparer l'évaluation précédente dans la prochaine évaluation
|
|
1008
1010
|
settings.agent.llama.append_eval.desc = Si activé, le prompt d'amélioration précédent sera vérifié dans la prochaine évaluation en boucle
|
|
@@ -1003,6 +1003,8 @@ settings.agent.func_call.native = Usa chiamate di funzione API native
|
|
|
1003
1003
|
settings.agent.func_call.native.desc = Se abilitato, l'applicazione utilizzerà chiamate di funzione API native invece del formato interno pygpt e i comandi non saranno utilizzati. Solo modalità Agente autonomo.
|
|
1004
1004
|
settings.agent.goal.notify = Visualizza una notifica nella barra delle applicazioni quando l'obiettivo è raggiunto.
|
|
1005
1005
|
settings.agent.idx = Indice
|
|
1006
|
+
settings.agent.idx.auto_retrieve = Recupero automatico del contesto aggiuntivo da RAG
|
|
1007
|
+
settings.agent.idx.auto_retrieve.desc = Recupero automatico del contesto aggiuntivo da RAG all'inizio, se l'indice è fornito.
|
|
1006
1008
|
settings.agent.idx.desc = Solo se la modalità interna è llama_index (Chat con file), scegli l'indice da utilizzare nelle modalità Agente ed Esperto
|
|
1007
1009
|
settings.agent.llama.append_eval = Aggiungi e confronta il prompt di valutazione precedente nella prossima valutazione
|
|
1008
1010
|
settings.agent.llama.append_eval.desc = Se abilitato, il prompt di miglioramento precedente sarà controllato nella prossima valutazione in loop
|
|
@@ -972,7 +972,7 @@ preset.research = Badania
|
|
|
972
972
|
preset.tab.experts = Eksperci
|
|
973
973
|
preset.tab.general = Ogólne
|
|
974
974
|
preset.tab.personalize = Personalizuj
|
|
975
|
-
preset.tab.remote_tools = Narzędzia zdalne
|
|
975
|
+
preset.tab.remote_tools = Narzędzia zdalne
|
|
976
976
|
preset.temperature = Temperatura
|
|
977
977
|
preset.tool.function = Funkcje
|
|
978
978
|
preset.tool.function.tip.agent_llama = Tip: Funkcje z wtyczek są automatycznie włączane.
|
|
@@ -1006,6 +1006,8 @@ settings.agent.func_call.native = Używaj natywnych wywołań funkcji API
|
|
|
1006
1006
|
settings.agent.func_call.native.desc = Jeśli włączone, aplikacja będzie używać natywnych wywołań funkcji API zamiast wewnętrznego formatu pygpt i polecenia nie będą używane. Tylko tryb Agent autonomiczny.
|
|
1007
1007
|
settings.agent.goal.notify = Wyświetl powiadomienie w zasobniku systemowym, gdy cel zostanie osiągnięty.
|
|
1008
1008
|
settings.agent.idx = Indeks
|
|
1009
|
+
settings.agent.idx.auto_retrieve = Automatyczne pobieranie dodatkowego kontekstu z RAG
|
|
1010
|
+
settings.agent.idx.auto_retrieve.desc = Automatyczne pobieranie dodatkowego kontekstu z RAG na początku, jeśli został podany indeks.
|
|
1009
1011
|
settings.agent.idx.desc = Tylko jeśli tryb wewnętrzny to llama_index (Czat z plikami), wybierz indeks do użycia w trybach Agenta i Experta
|
|
1010
1012
|
settings.agent.llama.append_eval = Dodaj i porównaj poprzedni prompt oceny w następnej ocenie
|
|
1011
1013
|
settings.agent.llama.append_eval.desc = Jeśli włączone, poprzedni prompt ulepszenia będzie sprawdzany w kolejnej ewaluacji w pętli
|
|
@@ -1003,6 +1003,8 @@ settings.agent.func_call.native = Використовувати нативні
|
|
|
1003
1003
|
settings.agent.func_call.native.desc = Якщо увімкнено, додаток буде використовувати нативні виклики функцій API замість внутрішнього формату pygpt і команди не будуть використовуватися. Тільки режим автономного Агента.
|
|
1004
1004
|
settings.agent.goal.notify = Показати сповіщення в системному треї, коли мета досягнута.
|
|
1005
1005
|
settings.agent.idx = Індекс
|
|
1006
|
+
settings.agent.idx.auto_retrieve = Автоматичне отримання додаткового контексту з RAG
|
|
1007
|
+
settings.agent.idx.auto_retrieve.desc = Автоматичне отримання додаткового контексту з RAG на початку, якщо надано індекс.
|
|
1006
1008
|
settings.agent.idx.desc = Тільки якщо внутрішній режим llama_index (Чат з файлами), виберіть індекс для використання у режимах Агента та Експерта
|
|
1007
1009
|
settings.agent.llama.append_eval = Додати та порівняти попередній запит оцінки при наступному оцінюванні
|
|
1008
1010
|
settings.agent.llama.append_eval.desc = Якщо увімкнено, попередній запит на поліпшення буде перевірено в наступній оцінці в циклі
|
|
@@ -1003,6 +1003,8 @@ settings.agent.func_call.native = 使用本地API函数调用
|
|
|
1003
1003
|
settings.agent.func_call.native.desc = 如果启用,应用程序将使用本地API函数调用而不是内部pygpt格式,并且命令将不被使用。仅限自主Agent模式。
|
|
1004
1004
|
settings.agent.goal.notify = 當目標達成時顯示托盤通知。
|
|
1005
1005
|
settings.agent.idx = 要使用的索引
|
|
1006
|
+
settings.agent.idx.auto_retrieve = 自动检索来自RAG的附加上下文
|
|
1007
|
+
settings.agent.idx.auto_retrieve.desc = 如果提供了索引,将在开始时自动检索来自RAG的附加上下文。
|
|
1006
1008
|
settings.agent.idx.desc = 仅当内部模式为 llama_index(文件聊天)时,选择在代理模式和专家模式下使用的索引
|
|
1007
1009
|
settings.agent.llama.append_eval = 在下一次评估中追加并比较先前的评估提示
|
|
1008
1010
|
settings.agent.llama.append_eval.desc = 如果启用,将在下一个循环评估中检查先前的改进提示
|