pygpt-net 2.6.20__py3-none-any.whl → 2.6.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +9 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/agent/agent.py +130 -2
- pygpt_net/controller/agent/experts.py +93 -96
- pygpt_net/controller/agent/llama.py +2 -1
- pygpt_net/controller/assistant/assistant.py +18 -1
- pygpt_net/controller/attachment/attachment.py +17 -1
- pygpt_net/controller/camera/camera.py +15 -7
- pygpt_net/controller/chat/chat.py +2 -2
- pygpt_net/controller/chat/common.py +50 -33
- pygpt_net/controller/chat/image.py +67 -77
- pygpt_net/controller/chat/input.py +94 -166
- pygpt_net/controller/chat/output.py +83 -140
- pygpt_net/controller/chat/response.py +83 -102
- pygpt_net/controller/chat/text.py +116 -149
- pygpt_net/controller/ctx/common.py +2 -1
- pygpt_net/controller/ctx/ctx.py +86 -6
- pygpt_net/controller/files/files.py +13 -1
- pygpt_net/controller/idx/idx.py +26 -2
- pygpt_net/controller/kernel/reply.py +53 -66
- pygpt_net/controller/kernel/stack.py +16 -16
- pygpt_net/controller/model/importer.py +2 -1
- pygpt_net/controller/model/model.py +62 -3
- pygpt_net/controller/settings/editor.py +4 -4
- pygpt_net/controller/ui/ui.py +16 -2
- pygpt_net/core/agents/observer/evaluation.py +3 -3
- pygpt_net/core/agents/provider.py +25 -3
- pygpt_net/core/agents/runner.py +4 -1
- pygpt_net/core/agents/runners/llama_workflow.py +19 -7
- pygpt_net/core/agents/runners/loop.py +3 -1
- pygpt_net/core/agents/runners/openai_workflow.py +17 -3
- pygpt_net/core/agents/tools.py +4 -1
- pygpt_net/core/bridge/context.py +34 -37
- pygpt_net/core/ctx/ctx.py +1 -1
- pygpt_net/core/db/database.py +2 -2
- pygpt_net/core/debug/debug.py +12 -1
- pygpt_net/core/dispatcher/dispatcher.py +24 -1
- pygpt_net/core/events/app.py +7 -7
- pygpt_net/core/events/control.py +26 -26
- pygpt_net/core/events/event.py +6 -3
- pygpt_net/core/events/kernel.py +2 -2
- pygpt_net/core/events/render.py +13 -13
- pygpt_net/core/experts/experts.py +76 -82
- pygpt_net/core/experts/worker.py +12 -12
- pygpt_net/core/models/models.py +5 -1
- pygpt_net/core/models/ollama.py +14 -5
- pygpt_net/core/render/web/helpers.py +2 -2
- pygpt_net/core/render/web/renderer.py +4 -4
- pygpt_net/core/types/__init__.py +2 -1
- pygpt_net/core/types/agent.py +4 -4
- pygpt_net/core/types/base.py +19 -0
- pygpt_net/core/types/console.py +6 -6
- pygpt_net/core/types/mode.py +8 -8
- pygpt_net/core/types/multimodal.py +3 -3
- pygpt_net/core/types/openai.py +2 -1
- pygpt_net/data/config/config.json +4 -4
- pygpt_net/data/config/models.json +19 -3
- pygpt_net/data/config/settings.json +14 -14
- pygpt_net/data/locale/locale.en.ini +2 -2
- pygpt_net/item/ctx.py +256 -240
- pygpt_net/item/model.py +59 -116
- pygpt_net/item/preset.py +122 -105
- pygpt_net/provider/agents/llama_index/workflow/planner.py +3 -3
- pygpt_net/provider/agents/openai/agent.py +4 -12
- pygpt_net/provider/agents/openai/agent_b2b.py +10 -15
- pygpt_net/provider/agents/openai/agent_planner.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_experts.py +3 -7
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -8
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -8
- pygpt_net/provider/agents/openai/bot_researcher.py +2 -18
- pygpt_net/provider/agents/openai/bots/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/manager.py +1 -10
- pygpt_net/provider/agents/openai/evolve.py +5 -9
- pygpt_net/provider/agents/openai/supervisor.py +4 -8
- pygpt_net/provider/core/config/patch.py +10 -3
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +43 -43
- pygpt_net/provider/core/model/patch.py +11 -1
- pygpt_net/provider/core/preset/json_file.py +47 -49
- pygpt_net/provider/gpt/agents/experts.py +2 -2
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/METADATA +13 -6
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/RECORD +86 -85
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -20,7 +20,7 @@ from pygpt_net.item.ctx import CtxItem
|
|
|
20
20
|
class Reply:
|
|
21
21
|
def __init__(self, window=None):
|
|
22
22
|
"""
|
|
23
|
-
Reply handler
|
|
23
|
+
Reply handler (response from plugins, tools, etc.)
|
|
24
24
|
|
|
25
25
|
:param window: Window instance
|
|
26
26
|
"""
|
|
@@ -42,28 +42,33 @@ class Reply:
|
|
|
42
42
|
:param extra: extra data
|
|
43
43
|
:return: list of results
|
|
44
44
|
"""
|
|
45
|
-
flush = False
|
|
46
|
-
if "flush" in extra and extra["flush"]:
|
|
47
|
-
flush = True
|
|
45
|
+
flush = extra.get("flush", False) if isinstance(extra, dict) else False
|
|
48
46
|
ctx = context.ctx
|
|
49
|
-
if ctx is
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
47
|
+
if ctx is None:
|
|
48
|
+
return []
|
|
49
|
+
|
|
50
|
+
core = self.window.core
|
|
51
|
+
self.last_result = ctx.results
|
|
52
|
+
self.on_post_response(ctx, extra)
|
|
53
|
+
|
|
54
|
+
if ctx.agent_call:
|
|
55
|
+
# TODO: clear() here?
|
|
56
|
+
return ctx.results # abort if called by agent, TODO: check if needed!!!!!
|
|
57
|
+
|
|
58
|
+
core.debug.info("Reply...")
|
|
59
|
+
if core.debug.enabled() and self.is_log():
|
|
60
|
+
core.debug.debug("CTX REPLY: " + str(ctx))
|
|
61
|
+
|
|
62
|
+
if ctx.reply:
|
|
63
|
+
if self.reply_idx >= ctx.pid: # prevent multiple replies per ctx
|
|
64
|
+
return []
|
|
65
|
+
self.reply_idx = ctx.pid
|
|
66
|
+
self.append(ctx)
|
|
67
|
+
|
|
68
|
+
if flush or self.window.controller.kernel.async_allowed(ctx):
|
|
69
|
+
self.flush()
|
|
70
|
+
|
|
71
|
+
return ctx.results
|
|
67
72
|
|
|
68
73
|
def append(self, ctx: CtxItem):
|
|
69
74
|
"""
|
|
@@ -73,76 +78,63 @@ class Reply:
|
|
|
73
78
|
"""
|
|
74
79
|
self.window.core.debug.info("Reply stack (add)...")
|
|
75
80
|
self.reply_stack.append(ctx.results)
|
|
76
|
-
ctx.results = [] # clear results
|
|
77
81
|
self.reply_ctx = ctx
|
|
82
|
+
self.reply_ctx.results = [] # clear results
|
|
78
83
|
|
|
79
84
|
def flush(self):
|
|
80
85
|
"""Flush reply stack"""
|
|
81
86
|
if self.reply_ctx is None or len(self.reply_stack) == 0:
|
|
82
87
|
return
|
|
83
88
|
|
|
84
|
-
self.window.core
|
|
89
|
+
core = self.window.core
|
|
90
|
+
dispatch = self.window.dispatch
|
|
91
|
+
core.debug.info("Reply stack (flush)...")
|
|
92
|
+
|
|
85
93
|
results = []
|
|
86
94
|
for responses in self.reply_stack:
|
|
87
95
|
for result in responses:
|
|
88
96
|
results.append(result)
|
|
89
97
|
|
|
90
98
|
self.window.update_status("") # clear status
|
|
91
|
-
|
|
92
|
-
if self.window.controller.agent.legacy.enabled():
|
|
93
|
-
self.window.controller.agent.legacy.add_run()
|
|
94
|
-
self.window.controller.agent.legacy.update()
|
|
99
|
+
self.window.controller.agent.on_reply(self.reply_ctx) # handle reply in agent
|
|
95
100
|
|
|
96
101
|
# prepare data to send as reply
|
|
97
102
|
tool_data = json.dumps(results)
|
|
98
103
|
if (len(self.reply_stack) < 2
|
|
99
104
|
and self.reply_ctx.extra_ctx
|
|
100
|
-
and
|
|
105
|
+
and core.config.get("ctx.use_extra")):
|
|
101
106
|
tool_data = self.reply_ctx.extra_ctx # if extra content is set, use it as data to send
|
|
102
107
|
|
|
103
|
-
prev_ctx =
|
|
104
|
-
|
|
108
|
+
prev_ctx = core.ctx.as_previous(self.reply_ctx) # copy result to previous ctx and clear current ctx
|
|
109
|
+
core.ctx.update_item(self.reply_ctx) # update context in db
|
|
105
110
|
self.window.update_status('...')
|
|
106
111
|
|
|
107
|
-
# if response from sub call, from experts
|
|
108
|
-
parent_id = None
|
|
109
|
-
if self.reply_ctx.sub_call:
|
|
110
|
-
if self.reply_ctx.meta is not None:
|
|
111
|
-
parent_id = self.reply_ctx.meta.id # slave meta id
|
|
112
|
-
|
|
113
112
|
# tool output append
|
|
114
|
-
|
|
113
|
+
dispatch(RenderEvent(RenderEvent.TOOL_UPDATE, {
|
|
115
114
|
"meta": self.reply_ctx.meta,
|
|
116
115
|
"tool_data": tool_data,
|
|
117
|
-
}
|
|
118
|
-
event = RenderEvent(RenderEvent.TOOL_UPDATE, data)
|
|
119
|
-
self.window.dispatch(event)
|
|
116
|
+
}))
|
|
120
117
|
self.clear()
|
|
121
118
|
|
|
122
119
|
# disable reply if LlamaIndex agent is used
|
|
123
|
-
mode =
|
|
124
|
-
if mode == MODE_LLAMA_INDEX:
|
|
125
|
-
|
|
126
|
-
self.window.core.debug.info("Reply disabled for LlamaIndex ReAct agent")
|
|
127
|
-
return
|
|
120
|
+
mode = core.config.get("mode")
|
|
121
|
+
if mode == MODE_LLAMA_INDEX and core.config.get("llama.idx.react", False):
|
|
122
|
+
return
|
|
128
123
|
|
|
129
124
|
# send reply
|
|
130
125
|
context = BridgeContext()
|
|
131
126
|
context.ctx = prev_ctx
|
|
132
127
|
context.prompt = str(tool_data)
|
|
133
|
-
|
|
134
|
-
"force": True,
|
|
135
|
-
"reply": True,
|
|
136
|
-
"internal": True,
|
|
137
|
-
"parent_id": parent_id,
|
|
138
|
-
}
|
|
139
|
-
event = KernelEvent(KernelEvent.REPLY_RETURN, {
|
|
128
|
+
dispatch(KernelEvent(KernelEvent.REPLY_RETURN, {
|
|
140
129
|
'context': context,
|
|
141
|
-
'extra':
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
130
|
+
'extra': {
|
|
131
|
+
"force": True,
|
|
132
|
+
"reply": True,
|
|
133
|
+
"internal": True,
|
|
134
|
+
},
|
|
135
|
+
}))
|
|
136
|
+
|
|
137
|
+
def on_post_response(
|
|
146
138
|
self,
|
|
147
139
|
ctx: CtxItem,
|
|
148
140
|
extra_data: Optional[Dict[str, Any]] = None
|
|
@@ -157,8 +149,7 @@ class Reply:
|
|
|
157
149
|
if (ctx is None or not ctx.agent_call) or not self.window.controller.kernel.is_threaded():
|
|
158
150
|
if "post_update" in extra_data and isinstance(extra_data["post_update"], list):
|
|
159
151
|
if "file_explorer" in extra_data["post_update"]:
|
|
160
|
-
# update file explorer view
|
|
161
|
-
self.window.controller.files.update_explorer()
|
|
152
|
+
self.window.controller.files.update_explorer() # update file explorer view
|
|
162
153
|
|
|
163
154
|
def clear(self):
|
|
164
155
|
"""Clear reply stack"""
|
|
@@ -172,8 +163,4 @@ class Reply:
|
|
|
172
163
|
|
|
173
164
|
:return: true if can be logged
|
|
174
165
|
"""
|
|
175
|
-
|
|
176
|
-
if self.window.core.config.has("log.events") \
|
|
177
|
-
and self.window.core.config.get("log.events"):
|
|
178
|
-
is_log = True
|
|
179
|
-
return is_log
|
|
166
|
+
return self.window.core.config.get("log.events", False)
|
|
@@ -6,12 +6,12 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Any
|
|
13
13
|
|
|
14
|
-
from
|
|
14
|
+
from PySide6.QtWidgets import QApplication
|
|
15
15
|
|
|
16
16
|
from pygpt_net.core.events import KernelEvent
|
|
17
17
|
from pygpt_net.core.bridge.context import BridgeContext
|
|
@@ -44,14 +44,12 @@ class Stack:
|
|
|
44
44
|
"""
|
|
45
45
|
Check if reply stack has any item
|
|
46
46
|
|
|
47
|
-
:return: True if has
|
|
47
|
+
:return: True if has items
|
|
48
48
|
"""
|
|
49
49
|
return self.current is not None
|
|
50
50
|
|
|
51
51
|
def clear(self):
|
|
52
|
-
"""
|
|
53
|
-
Clear reply stack
|
|
54
|
-
"""
|
|
52
|
+
"""Clear reply stack"""
|
|
55
53
|
self.current = None
|
|
56
54
|
self.unlock()
|
|
57
55
|
|
|
@@ -71,35 +69,37 @@ class Stack:
|
|
|
71
69
|
context.parent_id, # expert id
|
|
72
70
|
context.input, # query
|
|
73
71
|
)
|
|
72
|
+
# cmd execute
|
|
74
73
|
elif context.type == ReplyContext.CMD_EXECUTE:
|
|
75
74
|
self.window.controller.plugins.apply_cmds(
|
|
76
75
|
context.ctx, # current ctx
|
|
77
76
|
context.cmds, # commands
|
|
78
77
|
)
|
|
78
|
+
# cmd execute (force)
|
|
79
79
|
elif context.type == ReplyContext.CMD_EXECUTE_FORCE:
|
|
80
80
|
self.window.controller.plugins.apply_cmds(
|
|
81
81
|
context.ctx, # current ctx
|
|
82
82
|
context.cmds, # commands
|
|
83
83
|
all=True,
|
|
84
84
|
)
|
|
85
|
+
# cmd execute (inline)
|
|
85
86
|
elif context.type == ReplyContext.CMD_EXECUTE_INLINE:
|
|
86
87
|
self.window.controller.plugins.apply_cmds_inline(
|
|
87
88
|
context.ctx, # current ctx
|
|
88
89
|
context.cmds, # commands
|
|
89
90
|
)
|
|
91
|
+
# agent continue
|
|
90
92
|
elif context.type == ReplyContext.AGENT_CONTINUE:
|
|
91
93
|
bridge_context = BridgeContext()
|
|
92
94
|
bridge_context.ctx = context.ctx
|
|
93
|
-
bridge_context.prompt = context.input
|
|
94
|
-
|
|
95
|
-
"force": True,
|
|
96
|
-
"internal": True,
|
|
97
|
-
}
|
|
98
|
-
event = KernelEvent(KernelEvent.INPUT_SYSTEM, {
|
|
95
|
+
bridge_context.prompt = context.input # from reply context
|
|
96
|
+
self.window.dispatch(KernelEvent(KernelEvent.INPUT_SYSTEM, {
|
|
99
97
|
'context': bridge_context,
|
|
100
|
-
'extra':
|
|
101
|
-
|
|
102
|
-
|
|
98
|
+
'extra': {
|
|
99
|
+
"force": True,
|
|
100
|
+
"internal": True,
|
|
101
|
+
},
|
|
102
|
+
}))
|
|
103
103
|
|
|
104
104
|
def is_locked(self) -> bool:
|
|
105
105
|
"""
|
|
@@ -131,7 +131,7 @@ class Stack:
|
|
|
131
131
|
|
|
132
132
|
def waiting(self) -> bool:
|
|
133
133
|
"""
|
|
134
|
-
Check if reply stack is waiting
|
|
134
|
+
Check if reply stack is waiting for processing
|
|
135
135
|
|
|
136
136
|
:return: True if waiting
|
|
137
137
|
"""
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 20:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -475,6 +475,7 @@ class Importer:
|
|
|
475
475
|
MODE_LLAMA_INDEX,
|
|
476
476
|
MODE_AGENT,
|
|
477
477
|
MODE_AGENT_LLAMA,
|
|
478
|
+
MODE_AGENT_OPENAI,
|
|
478
479
|
MODE_EXPERT,
|
|
479
480
|
]
|
|
480
481
|
m.provider = 'ollama'
|
|
@@ -6,18 +6,20 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.15
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
+
import os
|
|
12
13
|
from typing import Optional
|
|
13
14
|
|
|
14
|
-
from pygpt_net.core.events import Event, AppEvent
|
|
15
|
+
from pygpt_net.core.events import Event, AppEvent, BaseEvent
|
|
16
|
+
from pygpt_net.core.types import MODE_LLAMA_INDEX, MODE_CHAT
|
|
15
17
|
from pygpt_net.item.model import ModelItem
|
|
18
|
+
from pygpt_net.utils import trans
|
|
16
19
|
|
|
17
20
|
from .editor import Editor
|
|
18
21
|
from .importer import Importer
|
|
19
22
|
|
|
20
|
-
|
|
21
23
|
class Model:
|
|
22
24
|
def __init__(self, window=None):
|
|
23
25
|
"""
|
|
@@ -32,6 +34,63 @@ class Model:
|
|
|
32
34
|
def _ensure_current_model_map(self):
|
|
33
35
|
return self.window.core.config.data.setdefault('current_model', {})
|
|
34
36
|
|
|
37
|
+
def handle(self, event: BaseEvent):
|
|
38
|
+
"""
|
|
39
|
+
Handle events
|
|
40
|
+
|
|
41
|
+
:param event: BaseEvent: Event to handle
|
|
42
|
+
"""
|
|
43
|
+
name = event.name
|
|
44
|
+
mode = self.window.core.config.get("mode")
|
|
45
|
+
|
|
46
|
+
# on input begin
|
|
47
|
+
if name == Event.INPUT_BEGIN:
|
|
48
|
+
force = event.data.get("force", False)
|
|
49
|
+
stop = event.data.get("stop", False)
|
|
50
|
+
if not force and not stop:
|
|
51
|
+
# check ollama model
|
|
52
|
+
model = self.window.core.config.get('model')
|
|
53
|
+
if model:
|
|
54
|
+
model_data = self.window.core.models.get(model)
|
|
55
|
+
if model_data is not None and model_data.is_ollama():
|
|
56
|
+
if (mode == MODE_LLAMA_INDEX or
|
|
57
|
+
(
|
|
58
|
+
mode == MODE_CHAT and not model_data.is_openai_supported() and model_data.is_ollama()
|
|
59
|
+
)
|
|
60
|
+
):
|
|
61
|
+
model_id = model_data.get_ollama_model()
|
|
62
|
+
|
|
63
|
+
# load ENV vars first
|
|
64
|
+
if ('env' in model_data.llama_index
|
|
65
|
+
and model_data.llama_index['env'] is not None):
|
|
66
|
+
for item in model_data.llama_index['env']:
|
|
67
|
+
key = item.get('name', '').strip()
|
|
68
|
+
value = item.get('value', '').strip()
|
|
69
|
+
os.environ[key] = value
|
|
70
|
+
status = self.window.core.models.ollama.check_model(model_id)
|
|
71
|
+
is_installed = status.get('is_installed', False)
|
|
72
|
+
is_model = status.get('is_model', False)
|
|
73
|
+
if not is_installed:
|
|
74
|
+
event.data["stop"] = True # stop flow
|
|
75
|
+
self.window.ui.dialogs.alert(trans("dialog.ollama.not_installed"))
|
|
76
|
+
return
|
|
77
|
+
if not is_model:
|
|
78
|
+
event.data["stop"] = True # stop flow
|
|
79
|
+
self.window.ui.dialogs.alert(
|
|
80
|
+
trans("dialog.ollama.model_not_found").replace("{model}", model_id))
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
# on input before
|
|
84
|
+
elif name == Event.INPUT_BEFORE:
|
|
85
|
+
# check API key, show monit if no API key for current provider
|
|
86
|
+
model = self.window.core.config.get('model')
|
|
87
|
+
if model:
|
|
88
|
+
model_data = self.window.core.models.get(model)
|
|
89
|
+
if not self.window.controller.chat.common.check_api_key(mode=mode, model=model_data, monit=False):
|
|
90
|
+
self.window.controller.chat.common.check_api_key(mode=mode, model=model_data, monit=True)
|
|
91
|
+
event.data["stop"] = True # stop flow
|
|
92
|
+
return
|
|
93
|
+
|
|
35
94
|
def select(self, model: str):
|
|
36
95
|
"""
|
|
37
96
|
Select model
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 02:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -67,7 +67,7 @@ class Editor:
|
|
|
67
67
|
self.window.ui.add_hook("update.config.notepad.num", self.hook_update)
|
|
68
68
|
self.window.ui.add_hook("update.config.render.code_syntax", self.hook_update)
|
|
69
69
|
self.window.ui.add_hook("update.config.theme.style", self.hook_update)
|
|
70
|
-
self.window.ui.add_hook("update.config.
|
|
70
|
+
self.window.ui.add_hook("update.config.agent.output.render.all", self.hook_update)
|
|
71
71
|
self.window.ui.add_hook("update.config.audio.input.backend", self.hook_update)
|
|
72
72
|
self.window.ui.add_hook("update.config.audio.output.backend", self.hook_update)
|
|
73
73
|
# self.window.ui.add_hook("llama.idx.storage", self.hook_update) # vector store update
|
|
@@ -204,7 +204,7 @@ class Editor:
|
|
|
204
204
|
if self.config_changed('ctx.sources') or self.config_changed('ctx.audio'):
|
|
205
205
|
self.window.controller.ctx.refresh()
|
|
206
206
|
|
|
207
|
-
if self.config_changed('
|
|
207
|
+
if self.config_changed('agent.output.render.all'):
|
|
208
208
|
self.window.controller.chat.render.reload()
|
|
209
209
|
|
|
210
210
|
# update global shortcuts
|
|
@@ -294,7 +294,7 @@ class Editor:
|
|
|
294
294
|
self.window.core.config.set(key, value)
|
|
295
295
|
self.window.controller.ctx.update()
|
|
296
296
|
|
|
297
|
-
elif key == "
|
|
297
|
+
elif key == "agent.output.render.all":
|
|
298
298
|
self.window.core.config.set(key, value)
|
|
299
299
|
self.window.controller.chat.render.reload()
|
|
300
300
|
|
pygpt_net/controller/ui/ui.py
CHANGED
|
@@ -6,20 +6,20 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.15
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional
|
|
13
13
|
|
|
14
14
|
from PySide6.QtGui import QColor
|
|
15
15
|
|
|
16
|
+
from pygpt_net.core.events import BaseEvent, Event
|
|
16
17
|
from pygpt_net.utils import trans
|
|
17
18
|
|
|
18
19
|
from .mode import Mode
|
|
19
20
|
from .tabs import Tabs
|
|
20
21
|
from .vision import Vision
|
|
21
22
|
|
|
22
|
-
|
|
23
23
|
class UI:
|
|
24
24
|
def __init__(self, window=None):
|
|
25
25
|
"""
|
|
@@ -65,6 +65,20 @@ class UI:
|
|
|
65
65
|
self.vision.update()
|
|
66
66
|
self.window.controller.agent.legacy.update()
|
|
67
67
|
|
|
68
|
+
def handle(self, event: BaseEvent):
|
|
69
|
+
"""
|
|
70
|
+
Handle events
|
|
71
|
+
|
|
72
|
+
:param event: BaseEvent: Event to handle
|
|
73
|
+
"""
|
|
74
|
+
name = event.name
|
|
75
|
+
|
|
76
|
+
# on input begin
|
|
77
|
+
if name == Event.INPUT_BEGIN:
|
|
78
|
+
self.tabs.switch_to_first_chat() # switch to first active chat tab
|
|
79
|
+
elif name == Event.CTX_END:
|
|
80
|
+
self.update_tokens() # update UI
|
|
81
|
+
|
|
68
82
|
def get_colors(self) -> dict:
|
|
69
83
|
"""
|
|
70
84
|
Get color labels
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 02:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import List
|
|
@@ -188,8 +188,8 @@ class Evaluation:
|
|
|
188
188
|
i = 0
|
|
189
189
|
for ctx in history:
|
|
190
190
|
# if next input (but not last) then clear outputs - use only output after last user input
|
|
191
|
-
if self.is_input(ctx) and i < len(history) - 1:
|
|
192
|
-
outputs.clear()
|
|
191
|
+
# if self.is_input(ctx) and i < len(history) - 1:
|
|
192
|
+
# outputs.clear()
|
|
193
193
|
|
|
194
194
|
if self.is_output(ctx):
|
|
195
195
|
if ctx.output:
|
|
@@ -6,11 +6,13 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
|
+
import os
|
|
12
|
+
from typing import List, Dict, Any
|
|
11
13
|
|
|
12
|
-
from
|
|
13
|
-
|
|
14
|
+
from pygpt_net.core.types import MODE_CHAT
|
|
15
|
+
from pygpt_net.item.model import ModelItem
|
|
14
16
|
from pygpt_net.provider.agents.base import BaseAgent
|
|
15
17
|
|
|
16
18
|
|
|
@@ -94,3 +96,23 @@ class Provider:
|
|
|
94
96
|
# sort by name
|
|
95
97
|
choices.sort(key=lambda x: list(x.values())[0].lower())
|
|
96
98
|
return choices
|
|
99
|
+
|
|
100
|
+
def get_openai_model(self, model: ModelItem) -> Any:
|
|
101
|
+
"""
|
|
102
|
+
Get OpenAI model by model id
|
|
103
|
+
|
|
104
|
+
:param model: ModelItem
|
|
105
|
+
:return: OpenAI model provider
|
|
106
|
+
"""
|
|
107
|
+
from openai import AsyncOpenAI
|
|
108
|
+
from agents import (
|
|
109
|
+
OpenAIChatCompletionsModel,
|
|
110
|
+
)
|
|
111
|
+
if model.provider == "openai":
|
|
112
|
+
return model.id
|
|
113
|
+
else:
|
|
114
|
+
args = self.window.core.models.prepare_client_args(MODE_CHAT, model)
|
|
115
|
+
return OpenAIChatCompletionsModel(
|
|
116
|
+
model=model.id,
|
|
117
|
+
openai_client=AsyncOpenAI(**args),
|
|
118
|
+
)
|
pygpt_net/core/agents/runner.py
CHANGED
|
@@ -105,6 +105,7 @@ class Runner:
|
|
|
105
105
|
# vector store idx from preset
|
|
106
106
|
if preset:
|
|
107
107
|
vector_store_idx = preset.idx
|
|
108
|
+
extra["agent_idx"] = vector_store_idx
|
|
108
109
|
|
|
109
110
|
# tools
|
|
110
111
|
agent_tools = self.window.core.agents.tools
|
|
@@ -126,7 +127,9 @@ class Runner:
|
|
|
126
127
|
|
|
127
128
|
# --- ADDITIONAL CONTEXT ---
|
|
128
129
|
# append additional context from RAG if available
|
|
129
|
-
if vector_store_idx
|
|
130
|
+
if (vector_store_idx
|
|
131
|
+
and vector_store_idx != "_"
|
|
132
|
+
and self.window.core.config.get("agent.idx.auto_retrieve", True)):
|
|
130
133
|
ad_context = self.window.core.idx.chat.query_retrieval(
|
|
131
134
|
query=prompt,
|
|
132
135
|
idx=vector_store_idx,
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 02:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import re
|
|
@@ -87,13 +87,14 @@ class LlamaWorkflow(BaseRunner):
|
|
|
87
87
|
self.set_idle(signals)
|
|
88
88
|
return False
|
|
89
89
|
|
|
90
|
+
use_current = False
|
|
90
91
|
if ctx.partial:
|
|
92
|
+
use_current = True # use current item as response if partial item (do not create new one)
|
|
91
93
|
ctx.partial = False # reset partial flag
|
|
92
94
|
|
|
93
|
-
response_ctx = self.make_response(ctx)
|
|
95
|
+
response_ctx = self.make_response(ctx, use_current=use_current)
|
|
94
96
|
self.end_stream(response_ctx, signals)
|
|
95
97
|
self.send_response(response_ctx, signals, KernelEvent.APPEND_DATA) # send response
|
|
96
|
-
|
|
97
98
|
self.set_idle(signals)
|
|
98
99
|
return True
|
|
99
100
|
|
|
@@ -152,19 +153,24 @@ class LlamaWorkflow(BaseRunner):
|
|
|
152
153
|
ctx.output = ctx.agent_final_response # set output to current context
|
|
153
154
|
else:
|
|
154
155
|
ctx.output = ctx.live_output
|
|
155
|
-
|
|
156
156
|
return ctx
|
|
157
157
|
|
|
158
158
|
def make_response(
|
|
159
159
|
self,
|
|
160
|
-
ctx: CtxItem
|
|
160
|
+
ctx: CtxItem,
|
|
161
|
+
use_current: bool = False
|
|
161
162
|
) -> CtxItem:
|
|
162
163
|
"""
|
|
163
164
|
Create a response context item with the given input and output.
|
|
164
165
|
|
|
165
166
|
:param ctx: CtxItem - the context item to use as a base
|
|
167
|
+
:param use_current: If True, use the current context item instead of creating a new one
|
|
166
168
|
"""
|
|
167
|
-
|
|
169
|
+
if use_current:
|
|
170
|
+
response_ctx = ctx # use current context item
|
|
171
|
+
else:
|
|
172
|
+
response_ctx = self.add_ctx(ctx, with_tool_outputs=True)
|
|
173
|
+
|
|
168
174
|
response_ctx.set_input("")
|
|
169
175
|
|
|
170
176
|
prev_output = ctx.live_output
|
|
@@ -176,6 +182,9 @@ class LlamaWorkflow(BaseRunner):
|
|
|
176
182
|
response_ctx.extra["agent_output"] = True # mark as output response
|
|
177
183
|
response_ctx.extra["agent_finish"] = True # mark as finished
|
|
178
184
|
|
|
185
|
+
if "agent_input" in response_ctx.extra:
|
|
186
|
+
del response_ctx.extra["agent_input"] # remove agent input from extra
|
|
187
|
+
|
|
179
188
|
if ctx.agent_final_response: # only if not empty
|
|
180
189
|
response_ctx.extra["output"] = ctx.agent_final_response
|
|
181
190
|
|
|
@@ -194,9 +203,12 @@ class LlamaWorkflow(BaseRunner):
|
|
|
194
203
|
:param output: Output string
|
|
195
204
|
:return: Filtered output string
|
|
196
205
|
"""
|
|
206
|
+
if output is None:
|
|
207
|
+
return ""
|
|
208
|
+
|
|
197
209
|
# Remove <execute>...</execute> tags
|
|
198
210
|
filtered_output = re.sub(r'<execute>.*?</execute>', '', output, flags=re.DOTALL)
|
|
199
|
-
return filtered_output
|
|
211
|
+
return filtered_output.strip()
|
|
200
212
|
|
|
201
213
|
async def run_agent(
|
|
202
214
|
self,
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 02:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List
|
|
@@ -136,6 +136,7 @@ class Loop(BaseRunner):
|
|
|
136
136
|
self.set_status(signals, msg)
|
|
137
137
|
if score < 0:
|
|
138
138
|
self.send_response(ctx, signals, KernelEvent.APPEND_END)
|
|
139
|
+
self.set_idle(signals)
|
|
139
140
|
return True
|
|
140
141
|
good_score = self.window.core.config.get("agent.llama.loop.score", 75)
|
|
141
142
|
if score >= good_score != 0:
|
|
@@ -145,6 +146,7 @@ class Loop(BaseRunner):
|
|
|
145
146
|
score=str(score)
|
|
146
147
|
)
|
|
147
148
|
self.send_response(ctx, signals, KernelEvent.APPEND_END, msg=msg)
|
|
149
|
+
self.set_idle(signals)
|
|
148
150
|
return True
|
|
149
151
|
|
|
150
152
|
# print("Instruction: " + instruction, "Score: " + str(score))
|