pygpt-net 2.6.15__py3-none-any.whl → 2.6.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +6 -0
- pygpt_net/__init__.py +1 -1
- pygpt_net/controller/chat/command.py +18 -6
- pygpt_net/controller/plugins/plugins.py +31 -15
- pygpt_net/controller/presets/editor.py +11 -32
- pygpt_net/core/agents/observer/evaluation.py +3 -14
- pygpt_net/core/agents/runners/llama_workflow.py +9 -6
- pygpt_net/core/command/command.py +5 -3
- pygpt_net/core/experts/experts.py +58 -13
- pygpt_net/core/plugins/plugins.py +12 -1
- pygpt_net/data/config/config.json +5 -5
- pygpt_net/data/config/models.json +2 -2
- pygpt_net/data/locale/locale.en.ini +2 -2
- pygpt_net/data/locale/plugin.openai_dalle.de.ini +1 -1
- pygpt_net/data/locale/plugin.openai_dalle.en.ini +1 -1
- pygpt_net/data/locale/plugin.openai_dalle.es.ini +1 -1
- pygpt_net/data/locale/plugin.openai_dalle.fr.ini +1 -1
- pygpt_net/data/locale/plugin.openai_dalle.it.ini +1 -1
- pygpt_net/data/locale/plugin.openai_dalle.pl.ini +1 -1
- pygpt_net/data/locale/plugin.openai_dalle.uk.ini +1 -1
- pygpt_net/data/locale/plugin.openai_dalle.zh.ini +1 -1
- pygpt_net/data/locale/plugin.openai_vision.de.ini +1 -1
- pygpt_net/data/locale/plugin.openai_vision.en.ini +1 -1
- pygpt_net/data/locale/plugin.openai_vision.es.ini +1 -1
- pygpt_net/data/locale/plugin.openai_vision.fr.ini +1 -1
- pygpt_net/data/locale/plugin.openai_vision.it.ini +1 -1
- pygpt_net/data/locale/plugin.openai_vision.pl.ini +1 -1
- pygpt_net/data/locale/plugin.openai_vision.uk.ini +1 -1
- pygpt_net/data/locale/plugin.openai_vision.zh.ini +1 -1
- pygpt_net/item/ctx.py +5 -4
- pygpt_net/plugin/idx_llama_index/plugin.py +9 -5
- pygpt_net/plugin/idx_llama_index/worker.py +5 -2
- pygpt_net/plugin/openai_dalle/plugin.py +1 -1
- pygpt_net/tools/translator/ui/dialogs.py +1 -0
- pygpt_net/tools/translator/ui/widgets.py +1 -2
- pygpt_net/ui/dialog/plugins.py +6 -4
- pygpt_net/ui/widget/textarea/editor.py +1 -2
- {pygpt_net-2.6.15.dist-info → pygpt_net-2.6.16.dist-info}/METADATA +18 -12
- {pygpt_net-2.6.15.dist-info → pygpt_net-2.6.16.dist-info}/RECORD +42 -42
- {pygpt_net-2.6.15.dist-info → pygpt_net-2.6.16.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.15.dist-info → pygpt_net-2.6.16.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.15.dist-info → pygpt_net-2.6.16.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt
CHANGED
pygpt_net/__init__.py
CHANGED
|
@@ -13,7 +13,7 @@ __author__ = "Marcin Szczygliński"
|
|
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
|
15
15
|
__license__ = "MIT"
|
|
16
|
-
__version__ = "2.6.
|
|
16
|
+
__version__ = "2.6.16"
|
|
17
17
|
__build__ = "2025-08-20"
|
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.20 09:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -88,6 +88,10 @@ class Command:
|
|
|
88
88
|
else:
|
|
89
89
|
reply.type = ReplyContext.CMD_EXECUTE_INLINE
|
|
90
90
|
|
|
91
|
+
# force call (experts, internal, etc.)
|
|
92
|
+
if internal and ctx.force_call:
|
|
93
|
+
reply.type = ReplyContext.CMD_EXECUTE
|
|
94
|
+
|
|
91
95
|
data = {
|
|
92
96
|
"meta": ctx.meta,
|
|
93
97
|
}
|
|
@@ -101,10 +105,19 @@ class Command:
|
|
|
101
105
|
if internal:
|
|
102
106
|
ctx.agent_call = True
|
|
103
107
|
if reply.type == ReplyContext.CMD_EXECUTE:
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
+
if ctx.force_call:
|
|
109
|
+
# force call, execute all commands
|
|
110
|
+
self.window.controller.plugins.apply_cmds(
|
|
111
|
+
reply.ctx,
|
|
112
|
+
reply.cmds,
|
|
113
|
+
all=True,
|
|
114
|
+
execute_only=True,
|
|
115
|
+
)
|
|
116
|
+
else:
|
|
117
|
+
self.window.controller.plugins.apply_cmds(
|
|
118
|
+
reply.ctx,
|
|
119
|
+
reply.cmds,
|
|
120
|
+
)
|
|
108
121
|
elif reply.type == ReplyContext.CMD_EXECUTE_INLINE:
|
|
109
122
|
self.window.controller.plugins.apply_cmds_inline(
|
|
110
123
|
reply.ctx,
|
|
@@ -114,7 +127,6 @@ class Command:
|
|
|
114
127
|
else:
|
|
115
128
|
# force call
|
|
116
129
|
if ctx.force_call:
|
|
117
|
-
#ctx.agent_call = True
|
|
118
130
|
self.window.controller.plugins.apply_cmds(
|
|
119
131
|
reply.ctx,
|
|
120
132
|
reply.cmds,
|
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
# Updated Date: 2025.08.15 23:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
-
from typing import List, Dict, Any
|
|
12
|
+
from typing import List, Dict, Any, Optional
|
|
13
13
|
|
|
14
14
|
from PySide6.QtGui import QAction
|
|
15
15
|
|
|
@@ -37,9 +37,11 @@ class Plugins:
|
|
|
37
37
|
self._suspend_updates = 0
|
|
38
38
|
|
|
39
39
|
def _begin_batch(self):
|
|
40
|
+
"""Begin batch updates"""
|
|
40
41
|
self._suspend_updates += 1
|
|
41
42
|
|
|
42
43
|
def _end_batch(self):
|
|
44
|
+
"""End batch updates"""
|
|
43
45
|
if self._suspend_updates > 0:
|
|
44
46
|
self._suspend_updates -= 1
|
|
45
47
|
if self._suspend_updates == 0:
|
|
@@ -85,7 +87,7 @@ class Plugins:
|
|
|
85
87
|
pm = self.window.core.plugins
|
|
86
88
|
ui_menu = self.window.ui.menu
|
|
87
89
|
menu_plugins = ui_menu['plugins']
|
|
88
|
-
for pid in pm.get_ids():
|
|
90
|
+
for pid in pm.get_ids(sort=True):
|
|
89
91
|
if pid in menu_plugins:
|
|
90
92
|
continue
|
|
91
93
|
name = pm.get_name(pid)
|
|
@@ -185,7 +187,6 @@ class Plugins:
|
|
|
185
187
|
|
|
186
188
|
:param id: plugin id
|
|
187
189
|
:return: True if enabled
|
|
188
|
-
:rtype: bool
|
|
189
190
|
"""
|
|
190
191
|
return self.window.core.plugins.is_registered(id) and self.enabled.get(id, False)
|
|
191
192
|
|
|
@@ -213,7 +214,7 @@ class Plugins:
|
|
|
213
214
|
"""
|
|
214
215
|
pm = self.window.core.plugins
|
|
215
216
|
plugin_idx = 0
|
|
216
|
-
for pid in pm.get_ids():
|
|
217
|
+
for pid in pm.get_ids(sort=True):
|
|
217
218
|
if pm.has_options(pid):
|
|
218
219
|
if plugin_idx == idx:
|
|
219
220
|
self.settings.current_plugin = pid
|
|
@@ -222,15 +223,15 @@ class Plugins:
|
|
|
222
223
|
current = self.window.ui.models['plugin.list'].index(idx, 0)
|
|
223
224
|
self.window.ui.nodes['plugin.list'].setCurrentIndex(current)
|
|
224
225
|
|
|
225
|
-
def get_tab_idx(self, plugin_id: str) -> int:
|
|
226
|
+
def get_tab_idx(self, plugin_id: str) -> Optional[int]:
|
|
226
227
|
"""
|
|
227
228
|
Get plugin tab index
|
|
228
229
|
|
|
229
230
|
:param plugin_id: plugin id
|
|
230
|
-
:return: tab index
|
|
231
|
+
:return: tab index or None if not found
|
|
231
232
|
"""
|
|
232
233
|
pm = self.window.core.plugins
|
|
233
|
-
for i, pid in enumerate(pm.get_ids()):
|
|
234
|
+
for i, pid in enumerate(pm.get_ids(sort=True)):
|
|
234
235
|
if pid == plugin_id:
|
|
235
236
|
return i
|
|
236
237
|
return None
|
|
@@ -262,6 +263,7 @@ class Plugins:
|
|
|
262
263
|
def has_type(self, id: str, type: str):
|
|
263
264
|
"""
|
|
264
265
|
Check if plugin has type
|
|
266
|
+
|
|
265
267
|
:param id: plugin ID
|
|
266
268
|
:param type: type to check
|
|
267
269
|
:return: True if has type
|
|
@@ -342,7 +344,19 @@ class Plugins:
|
|
|
342
344
|
cmds: List[Dict[str, Any]],
|
|
343
345
|
all: bool = False,
|
|
344
346
|
execute_only: bool = False
|
|
345
|
-
):
|
|
347
|
+
) -> Optional[List[Any]]:
|
|
348
|
+
"""
|
|
349
|
+
Common method to apply commands
|
|
350
|
+
|
|
351
|
+
This method is used for both inline and non-inline commands.
|
|
352
|
+
|
|
353
|
+
:param event_type: name of the event type, either Event.CMD_EXECUTE or Event.CMD_INLINE
|
|
354
|
+
:param ctx: CtxItem
|
|
355
|
+
:param cmds: commands list, each command is a dictionary with keys like "cmd", "args", etc.
|
|
356
|
+
:param all: True to apply all commands, False to apply only enabled commands
|
|
357
|
+
:param execute_only: True to execute commands only, without any additional event
|
|
358
|
+
:return: results: results of the command execution, if any (ctx.results)
|
|
359
|
+
"""
|
|
346
360
|
commands = self.window.core.command.from_commands(cmds)
|
|
347
361
|
if len(commands) == 0:
|
|
348
362
|
return
|
|
@@ -375,14 +389,15 @@ class Plugins:
|
|
|
375
389
|
cmds: List[Dict[str, Any]],
|
|
376
390
|
all: bool = False,
|
|
377
391
|
execute_only: bool = False
|
|
378
|
-
):
|
|
392
|
+
) -> Optional[List[Any]]:
|
|
379
393
|
"""
|
|
380
|
-
Apply commands
|
|
394
|
+
Apply commands (CMD_EXECUTE event only)
|
|
381
395
|
|
|
382
396
|
:param ctx: CtxItem
|
|
383
397
|
:param cmds: commands list
|
|
384
398
|
:param all: True to apply all commands, False to apply only enabled commands
|
|
385
399
|
:param execute_only: True to execute commands only, without any additional event
|
|
400
|
+
:return: results: results of the command execution, if any (ctx.results)
|
|
386
401
|
"""
|
|
387
402
|
return self._apply_cmds_common(Event.CMD_EXECUTE, ctx, cmds, all=all, execute_only=execute_only)
|
|
388
403
|
|
|
@@ -390,13 +405,13 @@ class Plugins:
|
|
|
390
405
|
self,
|
|
391
406
|
ctx: CtxItem,
|
|
392
407
|
cmds: List[Dict[str, Any]]
|
|
393
|
-
):
|
|
408
|
+
) -> Optional[List[Any]]:
|
|
394
409
|
"""
|
|
395
410
|
Apply all commands (inline or not)
|
|
396
411
|
|
|
397
|
-
:param ctx:
|
|
398
|
-
:param cmds: commands
|
|
399
|
-
:return: results
|
|
412
|
+
:param ctx: CtxItem
|
|
413
|
+
:param cmds: commands list
|
|
414
|
+
:return: results: results of the command execution, if any (ctx.results)
|
|
400
415
|
"""
|
|
401
416
|
if self.window.core.config.get("cmd"):
|
|
402
417
|
return self.apply_cmds(ctx, cmds)
|
|
@@ -408,12 +423,13 @@ class Plugins:
|
|
|
408
423
|
self,
|
|
409
424
|
ctx: CtxItem,
|
|
410
425
|
cmds: List[Dict[str, Any]]
|
|
411
|
-
):
|
|
426
|
+
) -> Optional[List[Any]]:
|
|
412
427
|
"""
|
|
413
428
|
Apply inline commands
|
|
414
429
|
|
|
415
430
|
:param ctx: CtxItem
|
|
416
431
|
:param cmds: commands list
|
|
432
|
+
:return: results: results of the command execution, if any (ctx.results)
|
|
417
433
|
"""
|
|
418
434
|
return self._apply_cmds_common(Event.CMD_INLINE, ctx, cmds)
|
|
419
435
|
|
|
@@ -258,11 +258,7 @@ class Editor:
|
|
|
258
258
|
self.window.ui.add_hook("update.preset.agent_provider_openai", self.hook_update)
|
|
259
259
|
|
|
260
260
|
def toggle_extra_options(self):
|
|
261
|
-
"""
|
|
262
|
-
Toggle extra options in preset editor
|
|
263
|
-
|
|
264
|
-
:return: None
|
|
265
|
-
"""
|
|
261
|
+
"""Toggle extra options in preset editor"""
|
|
266
262
|
if not self.tab_options_idx:
|
|
267
263
|
return
|
|
268
264
|
mode = self.window.core.config.get('mode')
|
|
@@ -285,11 +281,7 @@ class Editor:
|
|
|
285
281
|
self.toggle_extra_options_by_provider()
|
|
286
282
|
|
|
287
283
|
def toggle_extra_options_by_provider(self):
|
|
288
|
-
"""
|
|
289
|
-
Toggle extra options in preset editor by provider
|
|
290
|
-
|
|
291
|
-
:return: None
|
|
292
|
-
"""
|
|
284
|
+
"""Toggle extra options in preset editor by provider"""
|
|
293
285
|
if not self.tab_options_idx:
|
|
294
286
|
# show base prompt
|
|
295
287
|
self.window.ui.tabs['preset.editor.extra'].setTabVisible(0, True)
|
|
@@ -398,11 +390,7 @@ class Editor:
|
|
|
398
390
|
)
|
|
399
391
|
|
|
400
392
|
def load_extra_defaults(self):
|
|
401
|
-
"""
|
|
402
|
-
Load extra options defaults for preset editor
|
|
403
|
-
|
|
404
|
-
:return: None
|
|
405
|
-
"""
|
|
393
|
+
"""Load extra options defaults for preset editor"""
|
|
406
394
|
if not self.tab_options_idx:
|
|
407
395
|
return
|
|
408
396
|
mode = self.window.core.config.get('mode')
|
|
@@ -431,11 +419,7 @@ class Editor:
|
|
|
431
419
|
)
|
|
432
420
|
|
|
433
421
|
def load_extra_defaults_current(self):
|
|
434
|
-
"""
|
|
435
|
-
Load extra options defaults on mode change
|
|
436
|
-
|
|
437
|
-
:return: None
|
|
438
|
-
"""
|
|
422
|
+
"""Load extra options defaults on mode change"""
|
|
439
423
|
if not self.tab_options_idx:
|
|
440
424
|
return
|
|
441
425
|
|
|
@@ -491,9 +475,8 @@ class Editor:
|
|
|
491
475
|
|
|
492
476
|
def append_extra_options(self, preset: PresetItem):
|
|
493
477
|
"""
|
|
494
|
-
|
|
478
|
+
Append extra options for preset editor
|
|
495
479
|
|
|
496
|
-
:param id: preset id
|
|
497
480
|
:param preset: preset item
|
|
498
481
|
"""
|
|
499
482
|
mode = self.window.core.config.get('mode')
|
|
@@ -536,9 +519,7 @@ class Editor:
|
|
|
536
519
|
preset.extra[id] = data_dict
|
|
537
520
|
|
|
538
521
|
def append_extra_config(self):
|
|
539
|
-
"""
|
|
540
|
-
Build extra configuration for the preset editor dialog
|
|
541
|
-
"""
|
|
522
|
+
"""Build extra configuration for the preset editor dialog"""
|
|
542
523
|
if self.built:
|
|
543
524
|
return
|
|
544
525
|
|
|
@@ -592,11 +573,7 @@ class Editor:
|
|
|
592
573
|
self.built = True
|
|
593
574
|
|
|
594
575
|
def append_default_prompt(self):
|
|
595
|
-
"""
|
|
596
|
-
Append default prompt to the preset editor
|
|
597
|
-
|
|
598
|
-
:return: None
|
|
599
|
-
"""
|
|
576
|
+
"""Append default prompt to the preset editor"""
|
|
600
577
|
mode = self.window.core.config.get('mode')
|
|
601
578
|
if mode not in [MODE_AGENT_OPENAI, MODE_AGENT_LLAMA]:
|
|
602
579
|
return
|
|
@@ -691,6 +668,7 @@ class Editor:
|
|
|
691
668
|
|
|
692
669
|
if id is None:
|
|
693
670
|
self.experts.update_list()
|
|
671
|
+
self.window.ui.config[self.id]['idx'].set_value("_") # reset idx combo if new preset
|
|
694
672
|
|
|
695
673
|
if id is not None and id != "":
|
|
696
674
|
if id in self.window.core.presets.items:
|
|
@@ -769,6 +747,7 @@ class Editor:
|
|
|
769
747
|
|
|
770
748
|
# set focus to name field
|
|
771
749
|
current_model = self.window.core.config.get('model')
|
|
750
|
+
|
|
772
751
|
# set current model in combo box as selected
|
|
773
752
|
if id is None:
|
|
774
753
|
self.window.ui.config[self.id]['model'].set_value(current_model)
|
|
@@ -784,7 +763,7 @@ class Editor:
|
|
|
784
763
|
close: bool = True
|
|
785
764
|
):
|
|
786
765
|
"""
|
|
787
|
-
Save
|
|
766
|
+
Save preset
|
|
788
767
|
|
|
789
768
|
:param force: force overwrite file
|
|
790
769
|
:param close: close dialog
|
|
@@ -900,7 +879,7 @@ class Editor:
|
|
|
900
879
|
if not is_new:
|
|
901
880
|
no_scroll = True
|
|
902
881
|
self.window.core.presets.save(id)
|
|
903
|
-
self.window.controller.presets.refresh()
|
|
882
|
+
self.window.controller.presets.refresh(no_scroll=no_scroll)
|
|
904
883
|
|
|
905
884
|
# close dialog
|
|
906
885
|
if close:
|
|
@@ -188,24 +188,13 @@ class Evaluation:
|
|
|
188
188
|
"""
|
|
189
189
|
outputs = []
|
|
190
190
|
for ctx in history:
|
|
191
|
-
if self.is_output(ctx):
|
|
192
|
-
if ctx.output:
|
|
193
|
-
outputs.append(ctx.output)
|
|
194
|
-
|
|
195
191
|
# if next input then clear outputs - use only output after last user input
|
|
196
192
|
if self.is_input(ctx):
|
|
197
193
|
outputs.clear()
|
|
198
194
|
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
if self.is_output(ctx):
|
|
203
|
-
if ctx.output:
|
|
204
|
-
outputs.append(ctx.output)
|
|
205
|
-
|
|
206
|
-
# if next input then clear outputs - use only output after last user input
|
|
207
|
-
if self.is_input(ctx):
|
|
208
|
-
outputs.clear()
|
|
195
|
+
if self.is_output(ctx):
|
|
196
|
+
if ctx.output:
|
|
197
|
+
outputs.append(ctx.output)
|
|
209
198
|
|
|
210
199
|
return "\n\n".join(outputs) if outputs else ""
|
|
211
200
|
|
|
@@ -240,6 +240,8 @@ class LlamaWorkflow(BaseRunner):
|
|
|
240
240
|
item_ctx.output = "" # empty to prevent render
|
|
241
241
|
item_ctx.stream = "" # for stream
|
|
242
242
|
|
|
243
|
+
print("RUN AGENT!!!!!!!!!!!!!!!!!!!!")
|
|
244
|
+
|
|
243
245
|
async for event in handler.stream_events():
|
|
244
246
|
if self.is_stopped():
|
|
245
247
|
# persist current output on stop
|
|
@@ -275,12 +277,13 @@ class LlamaWorkflow(BaseRunner):
|
|
|
275
277
|
if verbose:
|
|
276
278
|
print("\n\n-----STEP-----\n\n")
|
|
277
279
|
print(f"[{event.name}] {event.index}/{event.total} meta={event.meta}")
|
|
278
|
-
|
|
279
|
-
item_ctx
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
280
|
+
if flush:
|
|
281
|
+
item_ctx = self.on_next_ctx(
|
|
282
|
+
item_ctx,
|
|
283
|
+
signals=signals,
|
|
284
|
+
begin=begin,
|
|
285
|
+
stream=True,
|
|
286
|
+
)
|
|
284
287
|
elif isinstance(event, AgentStream):
|
|
285
288
|
if verbose:
|
|
286
289
|
print(f"{event.delta}", end="", flush=True)
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.20 09:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -574,11 +574,12 @@ class Command:
|
|
|
574
574
|
pass
|
|
575
575
|
return params
|
|
576
576
|
|
|
577
|
-
def is_native_enabled(self, force: bool = False) -> bool:
|
|
577
|
+
def is_native_enabled(self, force: bool = False, model: str = None) -> bool:
|
|
578
578
|
"""
|
|
579
579
|
Check if native tool calls are enabled
|
|
580
580
|
|
|
581
581
|
:param force: force check, ignore config
|
|
582
|
+
:param model: model name (optional)
|
|
582
583
|
:return: True if enabled
|
|
583
584
|
"""
|
|
584
585
|
disabled_modes = [
|
|
@@ -592,7 +593,8 @@ class Command:
|
|
|
592
593
|
return False
|
|
593
594
|
|
|
594
595
|
if not force:
|
|
595
|
-
model
|
|
596
|
+
if model is None:
|
|
597
|
+
model = self.window.core.config.get('model') # get from globals
|
|
596
598
|
if model:
|
|
597
599
|
model_data = self.window.core.models.get(model)
|
|
598
600
|
if model_data:
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.20 09:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -53,6 +53,7 @@ class Experts:
|
|
|
53
53
|
self.allowed_cmds = ["expert_call"]
|
|
54
54
|
self.worker = None
|
|
55
55
|
self.last_expert_id = None # last expert id used in call
|
|
56
|
+
self.last_idx = None # last index used in call
|
|
56
57
|
self.master_ctx = None # master meta for expert calls
|
|
57
58
|
|
|
58
59
|
def get_mode(self) -> str:
|
|
@@ -218,6 +219,28 @@ class Experts:
|
|
|
218
219
|
return {}
|
|
219
220
|
return calls
|
|
220
221
|
|
|
222
|
+
def extract_tool_calls(self, ctx: CtxItem):
|
|
223
|
+
"""
|
|
224
|
+
Extract tool calls from expert
|
|
225
|
+
|
|
226
|
+
:param ctx: context item
|
|
227
|
+
"""
|
|
228
|
+
for call in ctx.tool_calls:
|
|
229
|
+
if (call["type"] == "function"
|
|
230
|
+
and "function" in call
|
|
231
|
+
and call["function"]["name"] == "get_context"):
|
|
232
|
+
ctx.force_call = True # force call if get_context tool is used
|
|
233
|
+
ctx.cmds_before = [
|
|
234
|
+
{
|
|
235
|
+
"cmd": "get_context",
|
|
236
|
+
"params": {
|
|
237
|
+
"query": call["function"]["arguments"]["query"],
|
|
238
|
+
"idx": self.last_idx,
|
|
239
|
+
},
|
|
240
|
+
}
|
|
241
|
+
]
|
|
242
|
+
break
|
|
243
|
+
|
|
221
244
|
def reply(self, ctx: CtxItem):
|
|
222
245
|
"""
|
|
223
246
|
Re-send response from commands to master expert
|
|
@@ -360,6 +383,9 @@ class Experts:
|
|
|
360
383
|
ctx.extra = {}
|
|
361
384
|
ctx.extra["tool_calls"] = ctx.tool_calls
|
|
362
385
|
|
|
386
|
+
# if 'get_context' tool is used then force call, and append idx
|
|
387
|
+
self.extract_tool_calls(ctx) # extract tool calls from ctx
|
|
388
|
+
|
|
363
389
|
self.window.controller.chat.command.handle(ctx, internal=True) # handle cmds sync
|
|
364
390
|
if ctx.reply:
|
|
365
391
|
self.window.update_status("") # clear status
|
|
@@ -385,7 +411,6 @@ class Experts:
|
|
|
385
411
|
|
|
386
412
|
# make copy of ctx for reply, and change input name to expert name
|
|
387
413
|
reply_ctx = CtxItem()
|
|
388
|
-
|
|
389
414
|
reply_ctx.from_dict(ctx.to_dict())
|
|
390
415
|
reply_ctx.meta = master_ctx.meta
|
|
391
416
|
|
|
@@ -523,6 +548,25 @@ class Experts:
|
|
|
523
548
|
]
|
|
524
549
|
return cmds
|
|
525
550
|
|
|
551
|
+
def get_retriever_tool(self) -> Dict[str, str]:
|
|
552
|
+
"""
|
|
553
|
+
Get retriever tool for additional context retrieval
|
|
554
|
+
|
|
555
|
+
:return: retriever tool definition
|
|
556
|
+
"""
|
|
557
|
+
return {
|
|
558
|
+
"cmd": "get_context",
|
|
559
|
+
"instruction": "get additional context for a given query",
|
|
560
|
+
"params": [
|
|
561
|
+
{
|
|
562
|
+
"name": "query",
|
|
563
|
+
"description": "query to retrieve additional context for",
|
|
564
|
+
"required": True,
|
|
565
|
+
"type": "str",
|
|
566
|
+
}
|
|
567
|
+
]
|
|
568
|
+
}
|
|
569
|
+
|
|
526
570
|
def has_calls(self, ctx: CtxItem) -> bool:
|
|
527
571
|
"""
|
|
528
572
|
Check if context has expert calls
|
|
@@ -653,6 +697,9 @@ class ExpertWorker(QRunnable):
|
|
|
653
697
|
use_index = False
|
|
654
698
|
if db_idx and db_idx != '_':
|
|
655
699
|
use_index = True
|
|
700
|
+
self.window.core.experts.last_idx = db_idx # store last index used in call
|
|
701
|
+
else:
|
|
702
|
+
self.window.core.experts.last_idx = None
|
|
656
703
|
if use_index:
|
|
657
704
|
index, llm = self.window.core.idx.chat.get_index(db_idx, model_data, stream=False)
|
|
658
705
|
else:
|
|
@@ -715,9 +762,16 @@ class ExpertWorker(QRunnable):
|
|
|
715
762
|
return
|
|
716
763
|
else:
|
|
717
764
|
# native func call
|
|
718
|
-
if self.window.core.command.is_native_enabled():
|
|
765
|
+
if self.window.core.command.is_native_enabled(force=False, model=model):
|
|
766
|
+
|
|
767
|
+
# get native functions, without expert_call here
|
|
719
768
|
functions = self.window.core.command.get_functions(master_ctx.id)
|
|
720
|
-
|
|
769
|
+
|
|
770
|
+
# append retrieval tool if index is selected
|
|
771
|
+
if use_index:
|
|
772
|
+
retriever_tool = self.window.core.experts.get_retriever_tool()
|
|
773
|
+
func_list = self.window.core.command.cmds_to_functions([retriever_tool])
|
|
774
|
+
functions.append(func_list[0]) # append only first function
|
|
721
775
|
|
|
722
776
|
# call bridge
|
|
723
777
|
bridge_context = BridgeContext(
|
|
@@ -766,15 +820,7 @@ class ExpertWorker(QRunnable):
|
|
|
766
820
|
self.window.core.ctx.update_item(ctx)
|
|
767
821
|
|
|
768
822
|
ctx.from_previous() # append previous result if exists
|
|
769
|
-
|
|
770
|
-
# tmp switch meta for render purposes
|
|
771
|
-
ctx.meta = master_ctx.meta
|
|
772
|
-
|
|
773
|
-
if use_agent:
|
|
774
|
-
self.signals.output.emit(ctx, mode) # emit output signal, only if final response from agent
|
|
775
|
-
|
|
776
823
|
ctx.clear_reply() # reset results
|
|
777
|
-
ctx.meta = slave # restore before cmd execute
|
|
778
824
|
|
|
779
825
|
if not use_agent:
|
|
780
826
|
ctx.sub_tool_call = True
|
|
@@ -793,7 +839,6 @@ class ExpertWorker(QRunnable):
|
|
|
793
839
|
|
|
794
840
|
# make copy of ctx for reply, and change input name to expert name
|
|
795
841
|
reply_ctx = CtxItem()
|
|
796
|
-
|
|
797
842
|
reply_ctx.from_dict(ctx.to_dict())
|
|
798
843
|
reply_ctx.meta = master_ctx.meta
|
|
799
844
|
|
|
@@ -57,14 +57,25 @@ class Plugins:
|
|
|
57
57
|
"""
|
|
58
58
|
return self.plugins
|
|
59
59
|
|
|
60
|
-
def get_ids(self) -> List[str]:
|
|
60
|
+
def get_ids(self, sort: bool = False) -> List[str]:
|
|
61
61
|
"""
|
|
62
62
|
Get all plugins ids
|
|
63
63
|
|
|
64
|
+
:param sort: if True, return sorted ids
|
|
64
65
|
:return: plugins ids list
|
|
65
66
|
"""
|
|
67
|
+
if sort:
|
|
68
|
+
return self.get_sorted_ids()
|
|
66
69
|
return list(self.plugins.keys())
|
|
67
70
|
|
|
71
|
+
def get_sorted_ids(self) -> List[str]:
|
|
72
|
+
"""
|
|
73
|
+
Get all plugins ids sorted by name
|
|
74
|
+
|
|
75
|
+
:return: sorted plugins ids list
|
|
76
|
+
"""
|
|
77
|
+
return sorted(self.plugins.keys(), key=lambda pid: self.get_name(pid).lower())
|
|
78
|
+
|
|
68
79
|
def get(self, plugin_id: str) -> Optional[BasePlugin]:
|
|
69
80
|
"""
|
|
70
81
|
Get plugin by id
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
3
|
+
"version": "2.6.16",
|
|
4
|
+
"app.version": "2.6.16",
|
|
5
5
|
"updated_at": "2025-08-20T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
@@ -176,11 +176,11 @@
|
|
|
176
176
|
"download.dir": "download",
|
|
177
177
|
"experts.func_call.native": false,
|
|
178
178
|
"experts.mode": "chat",
|
|
179
|
-
"experts.use_agent":
|
|
179
|
+
"experts.use_agent": true,
|
|
180
180
|
"experts.api_use_responses": false,
|
|
181
181
|
"experts.internal.api_use_responses": false,
|
|
182
|
-
"font_size":
|
|
183
|
-
"font_size.input":
|
|
182
|
+
"font_size": 14,
|
|
183
|
+
"font_size.input": 14,
|
|
184
184
|
"font_size.ctx": 12,
|
|
185
185
|
"font_size.toolbox": 12,
|
|
186
186
|
"func_call.native": true,
|
|
@@ -1147,8 +1147,8 @@ settings.experts.internal.api_use_responses = Use Responses API in Experts (slav
|
|
|
1147
1147
|
settings.experts.internal.api_use_responses.desc = Use Responses API instead of ChatCompletions API for Expert instances (slave models). OpenAI models only.
|
|
1148
1148
|
settings.experts.mode = Sub-mode for experts
|
|
1149
1149
|
settings.experts.mode.desc = Sub-mode to use for Experts
|
|
1150
|
-
settings.experts.use_agent = Use
|
|
1151
|
-
settings.experts.use_agent.desc = If enabled, expert will use the
|
|
1150
|
+
settings.experts.use_agent = Use agent for expert reasoning
|
|
1151
|
+
settings.experts.use_agent.desc = If enabled, expert will use the agent when generating response and calling tools.
|
|
1152
1152
|
settings.font_size = Font size (chat plain-text, notepads)
|
|
1153
1153
|
settings.font_size.ctx = Font size (ctx list)
|
|
1154
1154
|
settings.font_size.input = Font size (input)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
[LOCALE]
|
|
2
2
|
plugin.description = Integriert die DALL-E 3 Bildgenerierung mit jedem Chat und Modus. Einfach aktivieren und im Chatmodus nach einem Bild fragen, unter Verwendung des Standardmodells wie GPT-4. Das Plugin erfordert nicht, dass die Option "Befehle ausführen" aktiviert ist.
|
|
3
|
-
plugin.name =
|
|
3
|
+
plugin.name = Bildgenerierung (inline)
|
|
4
4
|
prompt.description = Prompt verwendet, um im Hintergrund eine Abfrage für DALL-E zu generieren.
|
|
5
5
|
prompt.label = Prompt
|
|
@@ -2,6 +2,6 @@
|
|
|
2
2
|
model.description = The model used for generating images; default is "dall-e-3".
|
|
3
3
|
model.label = Model
|
|
4
4
|
plugin.description = Integrates DALL-E 3 image generation with any chat and mode. Just enable and ask for an image in Chat mode, using the standard model like GPT-4. The plugin does not require the "+ Tools" option to be enabled.
|
|
5
|
-
plugin.name =
|
|
5
|
+
plugin.name = Image Generation (inline)
|
|
6
6
|
prompt.description = Prompt used for generating a query for DALL-E in the background.
|
|
7
7
|
prompt.label = Prompt
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
[LOCALE]
|
|
2
2
|
plugin.description = Integra la generación de imágenes de DALL-E 3 con cualquier chat y modo. Solo hay que activarla y pedir una imagen en el Modo Chat, utilizando el modelo estándar como GPT-4. El plugin no requiere que la opción "Ejecutar comandos" esté habilitada.
|
|
3
|
-
plugin.name =
|
|
3
|
+
plugin.name = Generación de Imágenes (en línea)
|
|
4
4
|
prompt.description = Prompt utilizado para generar una consulta para DALL-E en segundo plano.
|
|
5
5
|
prompt.label = Prompt
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
[LOCALE]
|
|
2
2
|
plugin.description = Intègre la génération d'images DALL-E 3 avec n'importe quelle discussion et mode. Il suffit de l'activer et de demander une image en mode Chat, en utilisant le modèle standard comme GPT-4. Le plugin ne nécessite pas que l'option "Exécuter des commandes" soit activée.
|
|
3
|
-
plugin.name =
|
|
3
|
+
plugin.name = Génération d'Images (intégré)
|
|
4
4
|
prompt.description = Prompt utilisé pour générer une requête pour DALL-E en arrière-plan.
|
|
5
5
|
prompt.label = Prompt
|