pygpt-net 2.6.20__py3-none-any.whl → 2.6.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +9 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/agent/agent.py +130 -2
- pygpt_net/controller/agent/experts.py +93 -96
- pygpt_net/controller/agent/llama.py +2 -1
- pygpt_net/controller/assistant/assistant.py +18 -1
- pygpt_net/controller/attachment/attachment.py +17 -1
- pygpt_net/controller/camera/camera.py +15 -7
- pygpt_net/controller/chat/chat.py +2 -2
- pygpt_net/controller/chat/common.py +50 -33
- pygpt_net/controller/chat/image.py +67 -77
- pygpt_net/controller/chat/input.py +94 -166
- pygpt_net/controller/chat/output.py +83 -140
- pygpt_net/controller/chat/response.py +83 -102
- pygpt_net/controller/chat/text.py +116 -149
- pygpt_net/controller/ctx/common.py +2 -1
- pygpt_net/controller/ctx/ctx.py +86 -6
- pygpt_net/controller/files/files.py +13 -1
- pygpt_net/controller/idx/idx.py +26 -2
- pygpt_net/controller/kernel/reply.py +53 -66
- pygpt_net/controller/kernel/stack.py +16 -16
- pygpt_net/controller/model/importer.py +2 -1
- pygpt_net/controller/model/model.py +62 -3
- pygpt_net/controller/settings/editor.py +4 -4
- pygpt_net/controller/ui/ui.py +16 -2
- pygpt_net/core/agents/observer/evaluation.py +3 -3
- pygpt_net/core/agents/provider.py +25 -3
- pygpt_net/core/agents/runner.py +4 -1
- pygpt_net/core/agents/runners/llama_workflow.py +19 -7
- pygpt_net/core/agents/runners/loop.py +3 -1
- pygpt_net/core/agents/runners/openai_workflow.py +17 -3
- pygpt_net/core/agents/tools.py +4 -1
- pygpt_net/core/bridge/context.py +34 -37
- pygpt_net/core/ctx/ctx.py +1 -1
- pygpt_net/core/db/database.py +2 -2
- pygpt_net/core/debug/debug.py +12 -1
- pygpt_net/core/dispatcher/dispatcher.py +24 -1
- pygpt_net/core/events/app.py +7 -7
- pygpt_net/core/events/control.py +26 -26
- pygpt_net/core/events/event.py +6 -3
- pygpt_net/core/events/kernel.py +2 -2
- pygpt_net/core/events/render.py +13 -13
- pygpt_net/core/experts/experts.py +76 -82
- pygpt_net/core/experts/worker.py +12 -12
- pygpt_net/core/models/models.py +5 -1
- pygpt_net/core/models/ollama.py +14 -5
- pygpt_net/core/render/web/helpers.py +2 -2
- pygpt_net/core/render/web/renderer.py +4 -4
- pygpt_net/core/types/__init__.py +2 -1
- pygpt_net/core/types/agent.py +4 -4
- pygpt_net/core/types/base.py +19 -0
- pygpt_net/core/types/console.py +6 -6
- pygpt_net/core/types/mode.py +8 -8
- pygpt_net/core/types/multimodal.py +3 -3
- pygpt_net/core/types/openai.py +2 -1
- pygpt_net/data/config/config.json +4 -4
- pygpt_net/data/config/models.json +19 -3
- pygpt_net/data/config/settings.json +14 -14
- pygpt_net/data/locale/locale.en.ini +2 -2
- pygpt_net/item/ctx.py +256 -240
- pygpt_net/item/model.py +59 -116
- pygpt_net/item/preset.py +122 -105
- pygpt_net/provider/agents/llama_index/workflow/planner.py +3 -3
- pygpt_net/provider/agents/openai/agent.py +4 -12
- pygpt_net/provider/agents/openai/agent_b2b.py +10 -15
- pygpt_net/provider/agents/openai/agent_planner.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_experts.py +3 -7
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -8
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -8
- pygpt_net/provider/agents/openai/bot_researcher.py +2 -18
- pygpt_net/provider/agents/openai/bots/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/manager.py +1 -10
- pygpt_net/provider/agents/openai/evolve.py +5 -9
- pygpt_net/provider/agents/openai/supervisor.py +4 -8
- pygpt_net/provider/core/config/patch.py +10 -3
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +43 -43
- pygpt_net/provider/core/model/patch.py +11 -1
- pygpt_net/provider/core/preset/json_file.py +47 -49
- pygpt_net/provider/gpt/agents/experts.py +2 -2
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/METADATA +13 -6
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/RECORD +86 -85
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional
|
|
@@ -34,7 +34,7 @@ class Text:
|
|
|
34
34
|
:param window: Window instance
|
|
35
35
|
"""
|
|
36
36
|
self.window = window
|
|
37
|
-
self.ctx_pid = 0
|
|
37
|
+
self.ctx_pid = 0 # sequence number for context items
|
|
38
38
|
|
|
39
39
|
def send(
|
|
40
40
|
self,
|
|
@@ -42,7 +42,6 @@ class Text:
|
|
|
42
42
|
reply: bool = False,
|
|
43
43
|
internal: bool = False,
|
|
44
44
|
prev_ctx: Optional[CtxItem] = None,
|
|
45
|
-
parent_id: Optional[str] = None,
|
|
46
45
|
multimodal_ctx: Optional[MultimodalContext] = None,
|
|
47
46
|
) -> CtxItem:
|
|
48
47
|
"""
|
|
@@ -52,59 +51,49 @@ class Text:
|
|
|
52
51
|
:param reply: reply from plugins
|
|
53
52
|
:param internal: internal call
|
|
54
53
|
:param prev_ctx: previous context item (if reply)
|
|
55
|
-
:param parent_id: parent context id
|
|
56
54
|
:param multimodal_ctx: multimodal context
|
|
57
|
-
:return:
|
|
55
|
+
:return: CtxItem instance
|
|
58
56
|
"""
|
|
59
|
-
self.window.update_status(trans(
|
|
57
|
+
self.window.update_status(trans("status.sending"))
|
|
58
|
+
|
|
59
|
+
core = self.window.core
|
|
60
|
+
controller = self.window.controller
|
|
61
|
+
dispatch = self.window.dispatch
|
|
62
|
+
config = core.config
|
|
63
|
+
log = controller.chat.log
|
|
60
64
|
|
|
61
65
|
# event: prepare username
|
|
62
66
|
event = Event(Event.USER_NAME, {
|
|
63
|
-
|
|
67
|
+
"value": config.get("user_name"),
|
|
64
68
|
})
|
|
65
|
-
|
|
66
|
-
user_name = event.data[
|
|
69
|
+
dispatch(event)
|
|
70
|
+
user_name = event.data["value"]
|
|
67
71
|
|
|
68
|
-
# event: prepare
|
|
72
|
+
# event: prepare AI name
|
|
69
73
|
event = Event(Event.AI_NAME, {
|
|
70
|
-
|
|
74
|
+
"value": config.get("ai_name"),
|
|
71
75
|
})
|
|
72
|
-
|
|
73
|
-
ai_name = event.data[
|
|
76
|
+
dispatch(event)
|
|
77
|
+
ai_name = event.data["value"]
|
|
74
78
|
|
|
75
79
|
# prepare mode, model, etc.
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
stream_mode = self.window.core.config.get('stream')
|
|
81
|
-
agent_idx = self.window.core.config.get('agent.llama.idx')
|
|
82
|
-
sys_prompt = self.window.core.config.get('prompt')
|
|
80
|
+
mode = config.get("mode")
|
|
81
|
+
model = config.get("model")
|
|
82
|
+
model_data = core.models.get(model)
|
|
83
|
+
sys_prompt = config.get("prompt")
|
|
83
84
|
sys_prompt_raw = sys_prompt # store raw prompt (without addons)
|
|
84
|
-
max_tokens =
|
|
85
|
-
|
|
85
|
+
max_tokens = config.get("max_output_tokens") # max output tokens
|
|
86
|
+
idx_mode = config.get("llama.idx.mode")
|
|
87
|
+
base_mode = mode # store base parent mode
|
|
88
|
+
stream = self.is_stream(mode) # check if stream is enabled for given mode
|
|
89
|
+
|
|
86
90
|
functions = [] # functions to call
|
|
87
91
|
tools_outputs = [] # tools outputs (assistant only)
|
|
88
|
-
idx_mode = self.window.core.config.get('llama.idx.mode')
|
|
89
|
-
|
|
90
|
-
# agent provider
|
|
91
|
-
if mode == MODE_AGENT_LLAMA:
|
|
92
|
-
agent_provider = self.window.core.config.get('agent.llama.provider')
|
|
93
|
-
elif mode == MODE_AGENT_OPENAI:
|
|
94
|
-
agent_provider = self.window.core.config.get('agent.openai.provider')
|
|
95
|
-
|
|
96
|
-
# o1 models: disable stream mode
|
|
97
|
-
if mode in (MODE_AGENT_LLAMA, MODE_AUDIO):
|
|
98
|
-
stream_mode = False
|
|
99
|
-
if mode == MODE_LLAMA_INDEX and idx_mode == "retrieval":
|
|
100
|
-
stream_mode = False
|
|
101
|
-
if mode == MODE_LLAMA_INDEX:
|
|
102
|
-
if not self.window.core.idx.chat.is_stream_allowed():
|
|
103
|
-
stream_mode = False
|
|
104
92
|
|
|
105
93
|
# create ctx item
|
|
106
|
-
meta =
|
|
107
|
-
meta.preset =
|
|
94
|
+
meta = core.ctx.get_current_meta()
|
|
95
|
+
meta.preset = config.get("preset") # current preset
|
|
96
|
+
|
|
108
97
|
ctx = CtxItem()
|
|
109
98
|
ctx.meta = meta # CtxMeta (owner object)
|
|
110
99
|
ctx.internal = internal
|
|
@@ -116,63 +105,38 @@ class Text:
|
|
|
116
105
|
ctx.prev_ctx = prev_ctx # store previous context item if exists
|
|
117
106
|
ctx.live = True
|
|
118
107
|
ctx.pid = self.ctx_pid # store PID
|
|
108
|
+
|
|
119
109
|
self.ctx_pid += 1 # increment PID
|
|
120
110
|
|
|
121
111
|
# if prev ctx is not empty, then copy input name to current ctx
|
|
122
112
|
if prev_ctx is not None and prev_ctx.sub_call is True: # sub_call = sent from expert
|
|
123
113
|
ctx.input_name = prev_ctx.input_name
|
|
124
|
-
|
|
125
|
-
# if reply from expert command
|
|
126
|
-
if parent_id is not None: # parent_id = reply from expert
|
|
127
|
-
# At this point, ctx.meta ID = slave META ID (parent_id is given from slave, not from master)
|
|
128
|
-
ctx.sub_reply = True # mark as sub reply
|
|
129
|
-
ctx.input_name = prev_ctx.input_name
|
|
130
|
-
ctx.output_name = prev_ctx.output_name
|
|
131
|
-
ctx.extra["sub_reply"] = True # mark as sub reply in extra data
|
|
132
|
-
else:
|
|
133
|
-
self.window.core.ctx.set_last_item(ctx) # store last item
|
|
134
|
-
|
|
135
114
|
if reply:
|
|
136
115
|
ctx.extra["sub_reply"] = True # mark as sub reply in extra data
|
|
137
116
|
|
|
138
|
-
|
|
117
|
+
controller.files.reset() # clear uploaded files IDs
|
|
118
|
+
controller.ctx.store_history(ctx, "input") # store to history
|
|
119
|
+
controller.chat.log_ctx(ctx, "input") # log
|
|
139
120
|
|
|
140
121
|
# assistant: create thread, upload attachments
|
|
141
122
|
if mode == MODE_ASSISTANT:
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
# store in history (input only)
|
|
145
|
-
if self.window.core.config.get('store_history'):
|
|
146
|
-
self.window.core.history.append(ctx, "input")
|
|
147
|
-
|
|
148
|
-
self.window.controller.chat.log_ctx(ctx, "input") # log
|
|
149
|
-
|
|
150
|
-
# agent mode: before context
|
|
151
|
-
if mode == MODE_AGENT:
|
|
152
|
-
self.window.controller.agent.legacy.on_ctx_before(ctx)
|
|
153
|
-
|
|
154
|
-
# event: context before
|
|
155
|
-
event = Event(Event.CTX_BEFORE)
|
|
156
|
-
event.ctx = ctx
|
|
157
|
-
self.window.dispatch(event)
|
|
123
|
+
controller.assistant.begin(ctx)
|
|
158
124
|
|
|
159
|
-
#
|
|
160
|
-
|
|
161
|
-
mode,
|
|
162
|
-
|
|
163
|
-
parent_id
|
|
164
|
-
)
|
|
125
|
+
# event: ctx before
|
|
126
|
+
dispatch(Event(Event.CTX_BEFORE, {
|
|
127
|
+
"mode": mode,
|
|
128
|
+
}, ctx=ctx))
|
|
165
129
|
|
|
166
|
-
# on pre prompt
|
|
130
|
+
# event: on pre prompt
|
|
167
131
|
event = Event(Event.PRE_PROMPT, {
|
|
168
|
-
|
|
169
|
-
|
|
132
|
+
"mode": mode,
|
|
133
|
+
"value": str(sys_prompt),
|
|
170
134
|
})
|
|
171
|
-
|
|
172
|
-
sys_prompt = event.data[
|
|
135
|
+
dispatch(event)
|
|
136
|
+
sys_prompt = event.data["value"]
|
|
173
137
|
|
|
174
138
|
# build final prompt (+plugins)
|
|
175
|
-
sys_prompt =
|
|
139
|
+
sys_prompt = core.prompt.prepare_sys_prompt(
|
|
176
140
|
mode=mode,
|
|
177
141
|
model=model_data,
|
|
178
142
|
sys_prompt=sys_prompt,
|
|
@@ -181,101 +145,104 @@ class Text:
|
|
|
181
145
|
internal=internal,
|
|
182
146
|
)
|
|
183
147
|
|
|
184
|
-
|
|
148
|
+
log("Appending input to chat window...")
|
|
185
149
|
|
|
186
150
|
# render: begin
|
|
187
|
-
|
|
151
|
+
dispatch(RenderEvent(RenderEvent.BEGIN, {
|
|
188
152
|
"meta": ctx.meta,
|
|
189
153
|
"ctx": ctx,
|
|
190
|
-
"stream":
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
# append text from input to chat window
|
|
196
|
-
data = {
|
|
154
|
+
"stream": stream,
|
|
155
|
+
}))
|
|
156
|
+
# render: append input text
|
|
157
|
+
dispatch(RenderEvent(RenderEvent.INPUT_APPEND, {
|
|
197
158
|
"meta": ctx.meta,
|
|
198
159
|
"ctx": ctx,
|
|
199
|
-
}
|
|
200
|
-
event = RenderEvent(RenderEvent.INPUT_APPEND, data)
|
|
201
|
-
self.window.dispatch(event)
|
|
160
|
+
}))
|
|
202
161
|
|
|
203
162
|
# add ctx to DB here and only update it after response,
|
|
204
163
|
# MUST BE REMOVED AFTER AS FIRST MSG (LAST ON LIST)
|
|
205
|
-
|
|
164
|
+
core.ctx.add(ctx)
|
|
165
|
+
core.ctx.set_last_item(ctx) # mark as last item
|
|
166
|
+
controller.ctx.update(reload=True, all=False)
|
|
206
167
|
|
|
207
|
-
#
|
|
208
|
-
|
|
209
|
-
reload=True,
|
|
210
|
-
all=False,
|
|
211
|
-
)
|
|
168
|
+
# prepare user and plugin tools (native mode only)
|
|
169
|
+
functions.extend(core.command.get_functions())
|
|
212
170
|
|
|
213
|
-
#
|
|
214
|
-
functions += self.window.core.command.get_functions(parent_id)
|
|
215
|
-
|
|
216
|
-
# assistant only
|
|
171
|
+
# assistant only - prepare tool outputs for assistant
|
|
217
172
|
if mode == MODE_ASSISTANT:
|
|
218
|
-
|
|
219
|
-
tools_outputs = self.window.controller.assistant.threads.handle_tool_outputs(ctx)
|
|
173
|
+
tools_outputs = controller.assistant.threads.handle_tool_outputs(ctx)
|
|
220
174
|
if len(tools_outputs) > 0:
|
|
221
|
-
|
|
175
|
+
log("Tool outputs sending...")
|
|
176
|
+
|
|
177
|
+
# --------------------- BRIDGE CALL ---------------------
|
|
222
178
|
|
|
223
|
-
# make API call
|
|
224
179
|
try:
|
|
225
|
-
# get attachments
|
|
226
|
-
files = self.window.core.attachments.get_all(mode)
|
|
180
|
+
files = core.attachments.get_all(mode) # get attachments
|
|
227
181
|
num_files = len(files)
|
|
228
182
|
if num_files > 0:
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
history=self.window.core.ctx.all(meta_id=parent_id), # get all ctx items
|
|
183
|
+
log(f"Attachments ({mode}): {num_files}")
|
|
184
|
+
|
|
185
|
+
context = BridgeContext(
|
|
186
|
+
assistant_id=config.get("assistant"),
|
|
187
|
+
attachments=files,
|
|
188
|
+
ctx=ctx, # CtxItem instance
|
|
189
|
+
external_functions=functions, # external functions
|
|
190
|
+
file_ids=controller.files.get_ids(), # uploaded files IDs
|
|
191
|
+
history=core.ctx.all(), # get all ctx items
|
|
192
|
+
idx=controller.idx.get_current(), # current idx
|
|
193
|
+
idx_mode=idx_mode, # llama index mode (chat or query)
|
|
194
|
+
max_tokens=max_tokens, # max output tokens
|
|
242
195
|
mode=mode,
|
|
196
|
+
model=model_data, # ModelItem instance
|
|
197
|
+
multimodal_ctx=multimodal_ctx, # multimodal context
|
|
243
198
|
parent_mode=base_mode,
|
|
244
|
-
|
|
199
|
+
preset=controller.presets.get_current(), # current preset
|
|
200
|
+
prompt=text, # input text
|
|
201
|
+
stream=stream, # is stream enabled
|
|
245
202
|
system_prompt=sys_prompt,
|
|
246
203
|
system_prompt_raw=sys_prompt_raw, # for llama-index (query mode only)
|
|
247
|
-
prompt=text, # input text
|
|
248
|
-
stream=stream_mode, # is stream mode
|
|
249
|
-
attachments=files,
|
|
250
|
-
file_ids=self.window.controller.files.uploaded_ids, # uploaded files IDs
|
|
251
|
-
assistant_id=assistant_id,
|
|
252
|
-
idx=self.window.controller.idx.current_idx, # current idx
|
|
253
|
-
idx_mode=idx_mode, # llama index mode (chat or query)
|
|
254
|
-
external_functions=functions, # external functions
|
|
255
204
|
tools_outputs=tools_outputs, # if not empty then will submit outputs to assistant
|
|
256
|
-
max_tokens=max_tokens, # max output tokens
|
|
257
|
-
multimodal_ctx=multimodal_ctx, # multimodal context
|
|
258
|
-
preset=self.window.controller.presets.get_current(), # current preset
|
|
259
205
|
)
|
|
260
206
|
extra = {
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
207
|
+
"mode": mode,
|
|
208
|
+
"reply": reply,
|
|
209
|
+
"internal": internal,
|
|
264
210
|
}
|
|
265
|
-
if mode in (MODE_AGENT_LLAMA, MODE_AGENT_OPENAI):
|
|
266
|
-
extra['agent_idx'] = agent_idx
|
|
267
|
-
extra['agent_provider'] = agent_provider
|
|
268
211
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
212
|
+
# event: bridge before
|
|
213
|
+
dispatch(Event(Event.BRIDGE_BEFORE, {
|
|
214
|
+
"mode": mode,
|
|
215
|
+
"context": context,
|
|
216
|
+
"extra": extra,
|
|
217
|
+
}))
|
|
218
|
+
|
|
219
|
+
controller.chat.common.lock_input() # lock input
|
|
220
|
+
dispatch(AppEvent(AppEvent.INPUT_CALL)) # app event
|
|
221
|
+
dispatch(KernelEvent(KernelEvent.REQUEST, {
|
|
222
|
+
"context": context,
|
|
223
|
+
"extra": extra,
|
|
224
|
+
}))
|
|
275
225
|
|
|
276
226
|
except Exception as e:
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
227
|
+
controller.chat.handle_error(e)
|
|
228
|
+
log(f"Bridge call ERROR: {e}")
|
|
229
|
+
|
|
230
|
+
return ctx
|
|
280
231
|
|
|
281
|
-
|
|
232
|
+
def is_stream(self, mode: str) -> bool:
|
|
233
|
+
"""
|
|
234
|
+
Check if stream is enabled for given mode
|
|
235
|
+
|
|
236
|
+
:param mode: mode
|
|
237
|
+
:return: True if stream is enabled, False otherwise
|
|
238
|
+
"""
|
|
239
|
+
core = self.window.core
|
|
240
|
+
stream = core.config.get("stream")
|
|
241
|
+
if mode in (MODE_AGENT_LLAMA, MODE_AUDIO):
|
|
242
|
+
return False # TODO: check if this is correct in agent
|
|
243
|
+
elif mode == MODE_LLAMA_INDEX:
|
|
244
|
+
if core.config.get("llama.idx.mode") == "retrieval":
|
|
245
|
+
return False
|
|
246
|
+
if not core.idx.chat.is_stream_allowed():
|
|
247
|
+
return False
|
|
248
|
+
return stream
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional
|
|
@@ -17,6 +17,7 @@ from PySide6.QtWidgets import QApplication
|
|
|
17
17
|
from pygpt_net.core.tabs.tab import Tab
|
|
18
18
|
from pygpt_net.utils import trans
|
|
19
19
|
from pygpt_net.item.ctx import CtxMeta
|
|
20
|
+
|
|
20
21
|
from .summarizer import Summarizer
|
|
21
22
|
|
|
22
23
|
|
pygpt_net/controller/ctx/ctx.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List
|
|
@@ -14,17 +14,20 @@ from typing import Optional, List
|
|
|
14
14
|
from PySide6.QtCore import QModelIndex, QTimer
|
|
15
15
|
from PySide6.QtGui import QStandardItem
|
|
16
16
|
|
|
17
|
-
from pygpt_net.core.events import Event, AppEvent, RenderEvent
|
|
18
17
|
from pygpt_net.item.ctx import CtxItem, CtxMeta
|
|
18
|
+
from pygpt_net.core.types import MODE_ASSISTANT
|
|
19
|
+
from pygpt_net.core.events import (
|
|
20
|
+
Event,
|
|
21
|
+
AppEvent,
|
|
22
|
+
RenderEvent,
|
|
23
|
+
BaseEvent
|
|
24
|
+
)
|
|
25
|
+
from pygpt_net.utils import trans
|
|
19
26
|
|
|
20
27
|
from .common import Common
|
|
21
28
|
from .summarizer import Summarizer
|
|
22
29
|
from .extra import Extra
|
|
23
30
|
|
|
24
|
-
from pygpt_net.utils import trans
|
|
25
|
-
from pygpt_net.core.types import MODE_ASSISTANT
|
|
26
|
-
|
|
27
|
-
|
|
28
31
|
class Ctx:
|
|
29
32
|
def __init__(self, window=None):
|
|
30
33
|
"""
|
|
@@ -45,6 +48,55 @@ class Ctx:
|
|
|
45
48
|
self.group_id = None
|
|
46
49
|
self.selected = []
|
|
47
50
|
|
|
51
|
+
def handle(self, event: BaseEvent):
|
|
52
|
+
"""
|
|
53
|
+
Handle events
|
|
54
|
+
|
|
55
|
+
:param event: BaseEvent: Event to handle
|
|
56
|
+
"""
|
|
57
|
+
name = event.name
|
|
58
|
+
|
|
59
|
+
# on input begin
|
|
60
|
+
if name == Event.INPUT_BEGIN:
|
|
61
|
+
force = event.data.get("force", False)
|
|
62
|
+
stop = event.data.get("stop", False)
|
|
63
|
+
if not force and not stop:
|
|
64
|
+
if self.extra.is_editing():
|
|
65
|
+
event.data["stop"] = True # stop flow
|
|
66
|
+
self.extra.edit_submit()
|
|
67
|
+
return
|
|
68
|
+
|
|
69
|
+
# on input before
|
|
70
|
+
elif name == Event.INPUT_BEFORE:
|
|
71
|
+
mode = event.data.get("mode")
|
|
72
|
+
text = event.data.get("value", "")
|
|
73
|
+
multimodal_ctx = event.data.get("multimodal_ctx", None)
|
|
74
|
+
# check if image captured from camera and attachment exists
|
|
75
|
+
camera_captured = (self.window.controller.ui.vision.has_vision()
|
|
76
|
+
and self.window.controller.attachment.has(mode))
|
|
77
|
+
|
|
78
|
+
# allow empty text input only if multimodal data, otherwise abort
|
|
79
|
+
is_audio = multimodal_ctx is not None and multimodal_ctx.is_audio_input
|
|
80
|
+
if len(text.strip()) == 0 and (not camera_captured and not is_audio):
|
|
81
|
+
event.data["stop"] = True # stop flow
|
|
82
|
+
event.data["silent"] = True # silent stop (no errors)
|
|
83
|
+
return
|
|
84
|
+
|
|
85
|
+
# on input accept
|
|
86
|
+
elif name == Event.INPUT_ACCEPT:
|
|
87
|
+
mode = event.data.get("mode")
|
|
88
|
+
# prepare ctx, create new ctx meta if there is no ctx, or no ctx selected
|
|
89
|
+
if self.window.core.ctx.count_meta() == 0 or self.window.core.ctx.get_current() is None:
|
|
90
|
+
self.window.core.ctx.new()
|
|
91
|
+
self.update()
|
|
92
|
+
self.window.controller.chat.log("New context created...") # log
|
|
93
|
+
else:
|
|
94
|
+
# check if current ctx is allowed for this mode - if not, then auto-create new ctx
|
|
95
|
+
self.handle_allowed(mode)
|
|
96
|
+
|
|
97
|
+
# update mode in ctx
|
|
98
|
+
self.update_mode_in_current()
|
|
99
|
+
|
|
48
100
|
def setup(self):
|
|
49
101
|
"""Setup ctx"""
|
|
50
102
|
self.common.restore_display_filter() # load filters first
|
|
@@ -1152,6 +1204,34 @@ class Ctx:
|
|
|
1152
1204
|
self.group_id = None
|
|
1153
1205
|
self.update_and_restore()
|
|
1154
1206
|
|
|
1207
|
+
def prepare_summary(self, ctx: CtxItem) -> bool:
|
|
1208
|
+
"""
|
|
1209
|
+
Prepare context summary
|
|
1210
|
+
|
|
1211
|
+
:param ctx: CtxItem
|
|
1212
|
+
:return: True if handled
|
|
1213
|
+
"""
|
|
1214
|
+
if not ctx.meta or not ctx.meta.initialized: # don't call if reply or internal mode
|
|
1215
|
+
if self.window.core.config.get('ctx.auto_summary'):
|
|
1216
|
+
self.window.controller.chat.log("Calling for prepare context name...")
|
|
1217
|
+
self.prepare_name(ctx) # async
|
|
1218
|
+
return True
|
|
1219
|
+
return False
|
|
1220
|
+
|
|
1221
|
+
def store_history(self, ctx: CtxItem, type: str) -> bool:
|
|
1222
|
+
"""
|
|
1223
|
+
Store ctx in history if enabled
|
|
1224
|
+
|
|
1225
|
+
:param ctx: CtxItem
|
|
1226
|
+
:param type: input|output
|
|
1227
|
+
:return: Tru if stored
|
|
1228
|
+
"""
|
|
1229
|
+
# store to history
|
|
1230
|
+
if self.window.core.config.get('store_history'):
|
|
1231
|
+
self.window.core.history.append(ctx, type)
|
|
1232
|
+
return True
|
|
1233
|
+
return False
|
|
1234
|
+
|
|
1155
1235
|
def reload(self):
|
|
1156
1236
|
"""Reload ctx"""
|
|
1157
1237
|
self.window.core.ctx.reset()
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import datetime
|
|
@@ -497,6 +497,18 @@ class Files:
|
|
|
497
497
|
path = path[1:]
|
|
498
498
|
return path
|
|
499
499
|
|
|
500
|
+
def reset(self):
|
|
501
|
+
"""Reset uploaded IDs"""
|
|
502
|
+
self.uploaded_ids = []
|
|
503
|
+
|
|
504
|
+
def get_ids(self) -> list:
|
|
505
|
+
"""
|
|
506
|
+
Get uploaded files IDs
|
|
507
|
+
|
|
508
|
+
:return: uploaded files IDs
|
|
509
|
+
"""
|
|
510
|
+
return self.uploaded_ids
|
|
511
|
+
|
|
500
512
|
def reload(self):
|
|
501
513
|
"""Reload files"""
|
|
502
514
|
self.update_explorer(reload=True)
|
pygpt_net/controller/idx/idx.py
CHANGED
|
@@ -6,12 +6,13 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import datetime
|
|
13
13
|
from typing import Optional
|
|
14
14
|
|
|
15
|
+
from pygpt_net.core.events import BaseEvent, Event
|
|
15
16
|
from pygpt_net.item.ctx import CtxItem
|
|
16
17
|
from pygpt_net.utils import trans
|
|
17
18
|
|
|
@@ -19,7 +20,6 @@ from .common import Common
|
|
|
19
20
|
from .indexer import Indexer
|
|
20
21
|
from .settings import Settings
|
|
21
22
|
|
|
22
|
-
|
|
23
23
|
class Idx:
|
|
24
24
|
def __init__(self, window=None):
|
|
25
25
|
"""
|
|
@@ -56,6 +56,22 @@ class Idx:
|
|
|
56
56
|
self.update()
|
|
57
57
|
self.locked = False
|
|
58
58
|
|
|
59
|
+
def handle(self, event: BaseEvent):
|
|
60
|
+
"""
|
|
61
|
+
Handle events
|
|
62
|
+
|
|
63
|
+
:param event: BaseEvent: Event to handle
|
|
64
|
+
"""
|
|
65
|
+
name = event.name
|
|
66
|
+
|
|
67
|
+
# on input begin, unlock experts and reset evaluation steps
|
|
68
|
+
if name == Event.CTX_END:
|
|
69
|
+
mode = event.data.get("mode", "")
|
|
70
|
+
if (mode not in self.window.controller.chat.input.no_ctx_idx_modes
|
|
71
|
+
and not self.window.controller.agent.legacy.enabled()):
|
|
72
|
+
self.window.controller.idx.on_ctx_end(event.ctx, mode=mode) # update ctx DB index
|
|
73
|
+
# disabled in agent mode here to prevent loops, handled in agent flow internally if agent mode
|
|
74
|
+
|
|
59
75
|
def get_modes_keys(self) -> list:
|
|
60
76
|
"""
|
|
61
77
|
Get list of available modes
|
|
@@ -342,6 +358,14 @@ class Idx:
|
|
|
342
358
|
"""
|
|
343
359
|
return self.current_idx is not None and self.current_idx != "_"
|
|
344
360
|
|
|
361
|
+
def get_current(self) -> str:
|
|
362
|
+
"""
|
|
363
|
+
Get current index name
|
|
364
|
+
|
|
365
|
+
:return: Current index name
|
|
366
|
+
"""
|
|
367
|
+
return self.current_idx
|
|
368
|
+
|
|
345
369
|
def is_stopped(self) -> bool:
|
|
346
370
|
"""
|
|
347
371
|
Check if indexing is stopped
|