pygpt-net 2.6.20__py3-none-any.whl → 2.6.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +13 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +3 -1
- pygpt_net/controller/__init__.py +4 -8
- pygpt_net/controller/access/voice.py +2 -2
- pygpt_net/controller/agent/agent.py +130 -2
- pygpt_net/controller/agent/experts.py +93 -96
- pygpt_net/controller/agent/llama.py +2 -1
- pygpt_net/controller/assistant/assistant.py +18 -1
- pygpt_net/controller/assistant/batch.py +2 -3
- pygpt_net/controller/assistant/editor.py +2 -2
- pygpt_net/controller/assistant/files.py +2 -3
- pygpt_net/controller/assistant/store.py +2 -2
- pygpt_net/controller/attachment/attachment.py +17 -1
- pygpt_net/controller/audio/audio.py +2 -2
- pygpt_net/controller/camera/camera.py +15 -7
- pygpt_net/controller/chat/chat.py +2 -2
- pygpt_net/controller/chat/common.py +50 -33
- pygpt_net/controller/chat/image.py +67 -77
- pygpt_net/controller/chat/input.py +94 -166
- pygpt_net/controller/chat/output.py +83 -140
- pygpt_net/controller/chat/response.py +83 -102
- pygpt_net/controller/chat/text.py +116 -149
- pygpt_net/controller/ctx/common.py +2 -1
- pygpt_net/controller/ctx/ctx.py +87 -6
- pygpt_net/controller/files/files.py +13 -1
- pygpt_net/controller/idx/idx.py +26 -2
- pygpt_net/controller/idx/indexer.py +85 -76
- pygpt_net/controller/kernel/reply.py +53 -66
- pygpt_net/controller/kernel/stack.py +16 -16
- pygpt_net/controller/lang/lang.py +52 -34
- pygpt_net/controller/model/importer.py +3 -2
- pygpt_net/controller/model/model.py +62 -3
- pygpt_net/controller/notepad/notepad.py +86 -84
- pygpt_net/controller/plugins/settings.py +3 -4
- pygpt_net/controller/settings/editor.py +4 -4
- pygpt_net/controller/settings/profile.py +105 -124
- pygpt_net/controller/theme/menu.py +154 -57
- pygpt_net/controller/theme/nodes.py +51 -44
- pygpt_net/controller/theme/theme.py +33 -9
- pygpt_net/controller/tools/tools.py +2 -2
- pygpt_net/controller/ui/tabs.py +2 -3
- pygpt_net/controller/ui/ui.py +16 -2
- pygpt_net/core/agents/observer/evaluation.py +3 -3
- pygpt_net/core/agents/provider.py +25 -3
- pygpt_net/core/agents/runner.py +4 -1
- pygpt_net/core/agents/runners/llama_workflow.py +19 -7
- pygpt_net/core/agents/runners/loop.py +3 -1
- pygpt_net/core/agents/runners/openai_workflow.py +17 -3
- pygpt_net/core/agents/tools.py +4 -1
- pygpt_net/core/bridge/context.py +34 -37
- pygpt_net/core/ctx/container.py +13 -12
- pygpt_net/core/ctx/ctx.py +1 -1
- pygpt_net/core/ctx/output.py +7 -4
- pygpt_net/core/db/database.py +2 -2
- pygpt_net/core/debug/console/console.py +2 -2
- pygpt_net/core/debug/debug.py +12 -1
- pygpt_net/core/dispatcher/dispatcher.py +24 -1
- pygpt_net/core/events/app.py +7 -7
- pygpt_net/core/events/control.py +26 -26
- pygpt_net/core/events/event.py +6 -3
- pygpt_net/core/events/kernel.py +2 -2
- pygpt_net/core/events/render.py +13 -13
- pygpt_net/core/experts/experts.py +76 -82
- pygpt_net/core/experts/worker.py +12 -12
- pygpt_net/core/filesystem/actions.py +1 -2
- pygpt_net/core/models/models.py +5 -1
- pygpt_net/core/models/ollama.py +14 -5
- pygpt_net/core/render/plain/helpers.py +2 -5
- pygpt_net/core/render/plain/renderer.py +26 -30
- pygpt_net/core/render/web/body.py +1 -1
- pygpt_net/core/render/web/helpers.py +2 -2
- pygpt_net/core/render/web/renderer.py +4 -4
- pygpt_net/core/settings/settings.py +43 -13
- pygpt_net/core/tabs/tabs.py +20 -13
- pygpt_net/core/types/__init__.py +2 -1
- pygpt_net/core/types/agent.py +4 -4
- pygpt_net/core/types/base.py +19 -0
- pygpt_net/core/types/console.py +6 -6
- pygpt_net/core/types/mode.py +8 -8
- pygpt_net/core/types/multimodal.py +3 -3
- pygpt_net/core/types/openai.py +2 -1
- pygpt_net/data/config/config.json +5 -5
- pygpt_net/data/config/models.json +19 -3
- pygpt_net/data/config/settings.json +14 -14
- pygpt_net/data/locale/locale.de.ini +4 -1
- pygpt_net/data/locale/locale.en.ini +6 -3
- pygpt_net/data/locale/locale.es.ini +4 -1
- pygpt_net/data/locale/locale.fr.ini +4 -1
- pygpt_net/data/locale/locale.it.ini +4 -1
- pygpt_net/data/locale/locale.pl.ini +5 -4
- pygpt_net/data/locale/locale.uk.ini +4 -1
- pygpt_net/data/locale/locale.zh.ini +4 -1
- pygpt_net/item/ctx.py +256 -240
- pygpt_net/item/model.py +59 -116
- pygpt_net/item/preset.py +122 -105
- pygpt_net/plugin/twitter/plugin.py +2 -2
- pygpt_net/provider/agents/llama_index/workflow/planner.py +3 -3
- pygpt_net/provider/agents/openai/agent.py +4 -12
- pygpt_net/provider/agents/openai/agent_b2b.py +10 -15
- pygpt_net/provider/agents/openai/agent_planner.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_experts.py +3 -7
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -8
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -8
- pygpt_net/provider/agents/openai/bot_researcher.py +2 -18
- pygpt_net/provider/agents/openai/bots/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/manager.py +1 -10
- pygpt_net/provider/agents/openai/evolve.py +5 -9
- pygpt_net/provider/agents/openai/supervisor.py +4 -8
- pygpt_net/provider/core/config/patch.py +10 -3
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +43 -43
- pygpt_net/provider/core/model/patch.py +11 -1
- pygpt_net/provider/core/preset/json_file.py +47 -49
- pygpt_net/provider/gpt/agents/experts.py +2 -2
- pygpt_net/tools/audio_transcriber/ui/dialogs.py +44 -54
- pygpt_net/tools/code_interpreter/body.py +1 -2
- pygpt_net/tools/code_interpreter/tool.py +7 -4
- pygpt_net/tools/code_interpreter/ui/html.py +1 -3
- pygpt_net/tools/code_interpreter/ui/widgets.py +2 -3
- pygpt_net/tools/html_canvas/ui/widgets.py +1 -3
- pygpt_net/tools/image_viewer/ui/dialogs.py +40 -37
- pygpt_net/tools/indexer/ui/widgets.py +2 -4
- pygpt_net/tools/media_player/tool.py +2 -5
- pygpt_net/tools/media_player/ui/widgets.py +60 -36
- pygpt_net/tools/text_editor/ui/widgets.py +18 -19
- pygpt_net/tools/translator/ui/widgets.py +39 -35
- pygpt_net/ui/base/context_menu.py +9 -4
- pygpt_net/ui/dialog/db.py +1 -3
- pygpt_net/ui/dialog/models.py +1 -3
- pygpt_net/ui/dialog/models_importer.py +2 -4
- pygpt_net/ui/dialogs.py +34 -30
- pygpt_net/ui/layout/chat/attachments.py +72 -84
- pygpt_net/ui/layout/chat/attachments_ctx.py +40 -44
- pygpt_net/ui/layout/chat/attachments_uploaded.py +36 -39
- pygpt_net/ui/layout/chat/calendar.py +100 -70
- pygpt_net/ui/layout/chat/chat.py +23 -17
- pygpt_net/ui/layout/chat/input.py +95 -118
- pygpt_net/ui/layout/chat/output.py +100 -162
- pygpt_net/ui/layout/chat/painter.py +89 -61
- pygpt_net/ui/layout/ctx/ctx_list.py +43 -52
- pygpt_net/ui/layout/status.py +23 -14
- pygpt_net/ui/layout/toolbox/agent.py +27 -38
- pygpt_net/ui/layout/toolbox/agent_llama.py +42 -45
- pygpt_net/ui/layout/toolbox/assistants.py +42 -38
- pygpt_net/ui/layout/toolbox/computer_env.py +32 -23
- pygpt_net/ui/layout/toolbox/footer.py +13 -16
- pygpt_net/ui/layout/toolbox/image.py +18 -21
- pygpt_net/ui/layout/toolbox/indexes.py +46 -89
- pygpt_net/ui/layout/toolbox/mode.py +20 -7
- pygpt_net/ui/layout/toolbox/model.py +12 -10
- pygpt_net/ui/layout/toolbox/presets.py +68 -52
- pygpt_net/ui/layout/toolbox/prompt.py +31 -58
- pygpt_net/ui/layout/toolbox/toolbox.py +25 -21
- pygpt_net/ui/layout/toolbox/vision.py +20 -22
- pygpt_net/ui/main.py +2 -4
- pygpt_net/ui/menu/about.py +64 -84
- pygpt_net/ui/menu/audio.py +87 -63
- pygpt_net/ui/menu/config.py +121 -127
- pygpt_net/ui/menu/debug.py +69 -76
- pygpt_net/ui/menu/file.py +32 -35
- pygpt_net/ui/menu/menu.py +2 -3
- pygpt_net/ui/menu/plugins.py +69 -33
- pygpt_net/ui/menu/theme.py +45 -46
- pygpt_net/ui/menu/tools.py +56 -60
- pygpt_net/ui/menu/video.py +20 -25
- pygpt_net/ui/tray.py +1 -2
- pygpt_net/ui/widget/audio/bar.py +1 -3
- pygpt_net/ui/widget/audio/input_button.py +3 -4
- pygpt_net/ui/widget/calendar/select.py +1 -2
- pygpt_net/ui/widget/dialog/base.py +12 -9
- pygpt_net/ui/widget/dialog/editor_file.py +20 -23
- pygpt_net/ui/widget/dialog/find.py +25 -24
- pygpt_net/ui/widget/dialog/profile.py +57 -53
- pygpt_net/ui/widget/draw/painter.py +62 -93
- pygpt_net/ui/widget/element/button.py +42 -30
- pygpt_net/ui/widget/element/checkbox.py +23 -15
- pygpt_net/ui/widget/element/group.py +6 -5
- pygpt_net/ui/widget/element/labels.py +1 -2
- pygpt_net/ui/widget/filesystem/explorer.py +93 -102
- pygpt_net/ui/widget/image/display.py +1 -2
- pygpt_net/ui/widget/lists/assistant.py +1 -2
- pygpt_net/ui/widget/lists/attachment.py +1 -2
- pygpt_net/ui/widget/lists/attachment_ctx.py +1 -2
- pygpt_net/ui/widget/lists/context.py +2 -4
- pygpt_net/ui/widget/lists/index.py +1 -2
- pygpt_net/ui/widget/lists/model.py +1 -2
- pygpt_net/ui/widget/lists/model_editor.py +1 -2
- pygpt_net/ui/widget/lists/model_importer.py +1 -2
- pygpt_net/ui/widget/lists/preset.py +1 -2
- pygpt_net/ui/widget/lists/preset_plugins.py +1 -2
- pygpt_net/ui/widget/lists/profile.py +1 -2
- pygpt_net/ui/widget/lists/uploaded.py +1 -2
- pygpt_net/ui/widget/option/checkbox.py +2 -4
- pygpt_net/ui/widget/option/checkbox_list.py +1 -4
- pygpt_net/ui/widget/option/cmd.py +1 -4
- pygpt_net/ui/widget/option/dictionary.py +25 -28
- pygpt_net/ui/widget/option/input.py +1 -3
- pygpt_net/ui/widget/tabs/Input.py +16 -12
- pygpt_net/ui/widget/tabs/body.py +5 -3
- pygpt_net/ui/widget/tabs/layout.py +36 -25
- pygpt_net/ui/widget/tabs/output.py +96 -74
- pygpt_net/ui/widget/textarea/calendar_note.py +1 -2
- pygpt_net/ui/widget/textarea/editor.py +41 -73
- pygpt_net/ui/widget/textarea/find.py +11 -10
- pygpt_net/ui/widget/textarea/html.py +3 -6
- pygpt_net/ui/widget/textarea/input.py +63 -64
- pygpt_net/ui/widget/textarea/notepad.py +54 -38
- pygpt_net/ui/widget/textarea/output.py +65 -54
- pygpt_net/ui/widget/textarea/search_input.py +5 -4
- pygpt_net/ui/widget/textarea/web.py +2 -4
- pygpt_net/ui/widget/vision/camera.py +2 -31
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.22.dist-info}/METADATA +25 -154
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.22.dist-info}/RECORD +218 -217
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.22.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.22.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.22.dist-info}/entry_points.txt +0 -0
|
@@ -6,10 +6,9 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
-
import os
|
|
13
12
|
from typing import Optional, Any, Dict
|
|
14
13
|
|
|
15
14
|
from pygpt_net.core.bridge import BridgeContext
|
|
@@ -17,12 +16,8 @@ from pygpt_net.core.bridge.context import MultimodalContext
|
|
|
17
16
|
from pygpt_net.core.events import Event, AppEvent, KernelEvent, RenderEvent
|
|
18
17
|
from pygpt_net.core.types import (
|
|
19
18
|
MODE_AGENT,
|
|
20
|
-
MODE_AGENT_LLAMA,
|
|
21
|
-
MODE_AGENT_OPENAI,
|
|
22
|
-
MODE_LLAMA_INDEX,
|
|
23
19
|
MODE_ASSISTANT,
|
|
24
20
|
MODE_IMAGE,
|
|
25
|
-
MODE_CHAT,
|
|
26
21
|
)
|
|
27
22
|
from pygpt_net.item.ctx import CtxItem
|
|
28
23
|
from pygpt_net.utils import trans
|
|
@@ -56,110 +51,51 @@ class Input:
|
|
|
56
51
|
|
|
57
52
|
:param force: force send
|
|
58
53
|
"""
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
54
|
+
dispatch = self.window.dispatch
|
|
55
|
+
mode = self.window.core.config.get('mode')
|
|
56
|
+
event = Event(Event.INPUT_BEGIN, {
|
|
57
|
+
'mode': mode,
|
|
58
|
+
'force': force,
|
|
59
|
+
'stop': False,
|
|
60
|
+
})
|
|
61
|
+
dispatch(event)
|
|
62
|
+
stop = event.data.get('stop', False)
|
|
64
63
|
|
|
65
64
|
# get text from input
|
|
66
65
|
text = self.window.ui.nodes['input'].toPlainText().strip()
|
|
67
|
-
mode = self.window.core.config.get('mode')
|
|
68
66
|
|
|
69
67
|
if not force:
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
# check if not in edit mode
|
|
73
|
-
if self.window.controller.ctx.extra.is_editing():
|
|
74
|
-
self.window.controller.ctx.extra.edit_submit()
|
|
68
|
+
dispatch(AppEvent(AppEvent.INPUT_SENT)) # app event
|
|
69
|
+
if stop:
|
|
75
70
|
return
|
|
76
71
|
|
|
77
|
-
# if agent mode: iterations check, show alert confirm if infinity loop
|
|
78
|
-
if self.window.controller.agent.common.is_infinity_loop(mode):
|
|
79
|
-
self.window.controller.agent.common.display_infinity_loop_confirm()
|
|
80
|
-
return
|
|
81
|
-
|
|
82
|
-
# check for agent is selected
|
|
83
|
-
if mode in [MODE_AGENT_OPENAI, MODE_AGENT_LLAMA]:
|
|
84
|
-
preset = self.window.controller.presets.get_current()
|
|
85
|
-
if not preset or preset.name == "*":
|
|
86
|
-
self.window.ui.dialogs.alert(
|
|
87
|
-
trans("dialog.agent.not_selected"))
|
|
88
|
-
return
|
|
89
|
-
|
|
90
|
-
# check ollama model
|
|
91
|
-
model = self.window.core.config.get('model')
|
|
92
|
-
if model:
|
|
93
|
-
model_data = self.window.core.models.get(model)
|
|
94
|
-
if model_data is not None and model_data.is_ollama():
|
|
95
|
-
if (mode == MODE_LLAMA_INDEX or
|
|
96
|
-
(mode == MODE_CHAT and not model_data.is_openai_supported() and model_data.is_ollama())):
|
|
97
|
-
model_id = model_data.get_ollama_model()
|
|
98
|
-
# load ENV vars first
|
|
99
|
-
if ('env' in model_data.llama_index
|
|
100
|
-
and model_data.llama_index['env'] is not None):
|
|
101
|
-
for item in model_data.llama_index['env']:
|
|
102
|
-
key = item.get('name', '').strip()
|
|
103
|
-
value = item.get('value', '').strip()
|
|
104
|
-
os.environ[key] = value
|
|
105
|
-
status = self.window.core.models.ollama.check_model(model_id)
|
|
106
|
-
is_installed = status.get('is_installed', False)
|
|
107
|
-
is_model = status.get('is_model', False)
|
|
108
|
-
if not is_installed:
|
|
109
|
-
self.window.ui.dialogs.alert(trans("dialog.ollama.not_installed"))
|
|
110
|
-
return
|
|
111
|
-
if not is_model:
|
|
112
|
-
self.window.ui.dialogs.alert(
|
|
113
|
-
trans("dialog.ollama.model_not_found").replace("{model}", model_id))
|
|
114
|
-
return
|
|
115
|
-
|
|
116
72
|
# listen for stop command
|
|
117
73
|
if self.generating \
|
|
118
74
|
and text is not None \
|
|
119
75
|
and text.lower().strip() in self.stop_commands:
|
|
120
76
|
self.window.controller.kernel.stop() # TODO: to chat main
|
|
121
|
-
|
|
77
|
+
dispatch(RenderEvent(RenderEvent.CLEAR_INPUT))
|
|
122
78
|
return
|
|
123
79
|
|
|
124
|
-
# agent modes
|
|
125
|
-
if mode == MODE_AGENT:
|
|
126
|
-
self.window.controller.agent.legacy.on_user_send(text) # begin Legacy (autonomous) agent flow
|
|
127
|
-
elif mode in [MODE_AGENT_LLAMA, MODE_AGENT_OPENAI]:
|
|
128
|
-
self.window.controller.agent.llama.on_user_send(text) # begin LlamaIndex adn OpenAI agent flow
|
|
129
|
-
|
|
130
80
|
# event: user input send (manually)
|
|
131
81
|
event = Event(Event.USER_SEND, {
|
|
82
|
+
'mode': mode,
|
|
132
83
|
'value': text,
|
|
133
84
|
})
|
|
134
|
-
|
|
135
|
-
'value': text,
|
|
136
|
-
}))
|
|
85
|
+
dispatch(event)
|
|
137
86
|
text = event.data['value']
|
|
138
87
|
|
|
139
|
-
#
|
|
140
|
-
if
|
|
141
|
-
|
|
142
|
-
"id": "chat",
|
|
143
|
-
"msg": "Reading attachments..."
|
|
144
|
-
}))
|
|
145
|
-
try:
|
|
146
|
-
self.window.controller.chat.attachment.handle(mode, text)
|
|
147
|
-
return # return here, will be handled in signal
|
|
148
|
-
except Exception as e:
|
|
149
|
-
self.window.dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
|
|
150
|
-
"id": "chat",
|
|
151
|
-
"msg": f"Error reading attachments: {str(e)}"
|
|
152
|
-
}))
|
|
153
|
-
return
|
|
88
|
+
# if attachments, return here - send will be handled via signal after upload
|
|
89
|
+
if self.handle_attachment(mode, text):
|
|
90
|
+
return
|
|
154
91
|
|
|
155
|
-
# event: handle input
|
|
92
|
+
# kernel event: handle input
|
|
156
93
|
context = BridgeContext()
|
|
157
94
|
context.prompt = text
|
|
158
|
-
|
|
95
|
+
dispatch(KernelEvent(KernelEvent.INPUT_USER, {
|
|
159
96
|
'context': context,
|
|
160
97
|
'extra': {},
|
|
161
|
-
})
|
|
162
|
-
self.window.dispatch(event)
|
|
98
|
+
}))
|
|
163
99
|
|
|
164
100
|
def send(
|
|
165
101
|
self,
|
|
@@ -172,21 +108,13 @@ class Input:
|
|
|
172
108
|
:param context: bridge context
|
|
173
109
|
:param extra: extra data
|
|
174
110
|
"""
|
|
175
|
-
text = str(context.prompt)
|
|
176
|
-
prev_ctx = context.ctx
|
|
177
|
-
force = extra.get("force", False)
|
|
178
|
-
reply = extra.get("reply", False)
|
|
179
|
-
internal = extra.get("internal", False)
|
|
180
|
-
parent_id = extra.get("parent_id", None)
|
|
181
|
-
multimodal_ctx = context.multimodal_ctx
|
|
182
111
|
self.execute(
|
|
183
|
-
text=
|
|
184
|
-
force=force,
|
|
185
|
-
reply=reply,
|
|
186
|
-
internal=internal,
|
|
187
|
-
prev_ctx=
|
|
188
|
-
|
|
189
|
-
multimodal_ctx=multimodal_ctx,
|
|
112
|
+
text=str(context.prompt),
|
|
113
|
+
force=extra.get("force", False),
|
|
114
|
+
reply=extra.get("reply", False),
|
|
115
|
+
internal=extra.get("internal", False),
|
|
116
|
+
prev_ctx=context.ctx,
|
|
117
|
+
multimodal_ctx=context.multimodal_ctx,
|
|
190
118
|
)
|
|
191
119
|
|
|
192
120
|
def execute(
|
|
@@ -196,7 +124,6 @@ class Input:
|
|
|
196
124
|
reply: bool = False,
|
|
197
125
|
internal: bool = False,
|
|
198
126
|
prev_ctx: Optional[CtxItem] = None,
|
|
199
|
-
parent_id: Optional[int] = None,
|
|
200
127
|
multimodal_ctx: Optional[MultimodalContext] = None,
|
|
201
128
|
):
|
|
202
129
|
"""
|
|
@@ -207,10 +134,14 @@ class Input:
|
|
|
207
134
|
:param reply: reply mode (from plugins)
|
|
208
135
|
:param internal: internal call
|
|
209
136
|
:param prev_ctx: previous context (if reply)
|
|
210
|
-
:param parent_id: parent id (if expert)
|
|
211
137
|
:param multimodal_ctx: multimodal context
|
|
212
138
|
"""
|
|
213
|
-
self.window.
|
|
139
|
+
core = self.window.core
|
|
140
|
+
controller = self.window.controller
|
|
141
|
+
dispatch = self.window.dispatch
|
|
142
|
+
log = controller.chat.log
|
|
143
|
+
|
|
144
|
+
dispatch(KernelEvent(KernelEvent.STATE_IDLE, {
|
|
214
145
|
"id": "chat",
|
|
215
146
|
}))
|
|
216
147
|
|
|
@@ -218,106 +149,103 @@ class Input:
|
|
|
218
149
|
if self.locked and not force and not internal:
|
|
219
150
|
return
|
|
220
151
|
|
|
221
|
-
|
|
152
|
+
log("Begin.")
|
|
222
153
|
self.generating = True # set generating flag
|
|
223
154
|
|
|
224
|
-
|
|
155
|
+
# check if assistant is selected
|
|
156
|
+
mode = core.config.get('mode')
|
|
225
157
|
if mode == MODE_ASSISTANT:
|
|
226
|
-
|
|
227
|
-
if self.window.core.config.get('assistant') is None \
|
|
228
|
-
or self.window.core.config.get('assistant') == "":
|
|
229
|
-
self.window.ui.dialogs.alert(trans('error.assistant_not_selected'))
|
|
158
|
+
if not controller.assistant.check():
|
|
230
159
|
self.generating = False # unlock
|
|
231
160
|
return
|
|
232
|
-
elif self.window.controller.ui.vision.has_vision():
|
|
233
|
-
# handle auto capture
|
|
234
|
-
self.window.controller.camera.handle_auto_capture()
|
|
235
161
|
|
|
236
|
-
#
|
|
237
|
-
|
|
238
|
-
self.window.controller.kernel.resume()
|
|
162
|
+
# handle camera capture
|
|
163
|
+
controller.camera.handle_auto_capture(mode)
|
|
239
164
|
|
|
240
|
-
|
|
165
|
+
# unlock if locked
|
|
166
|
+
controller.assistant.resume()
|
|
167
|
+
controller.kernel.resume()
|
|
241
168
|
|
|
242
|
-
#
|
|
243
|
-
if mode == MODE_AGENT:
|
|
244
|
-
self.log(f"Agent: input before: {text}")
|
|
245
|
-
text = self.window.controller.agent.legacy.on_input_before(text)
|
|
169
|
+
log(f"Input prompt: {text}") # log
|
|
246
170
|
|
|
247
|
-
# event: before input
|
|
171
|
+
# event: before input handle
|
|
248
172
|
event = Event(Event.INPUT_BEFORE, {
|
|
249
|
-
'value': text,
|
|
250
173
|
'mode': mode,
|
|
174
|
+
'value': text,
|
|
175
|
+
'multimodal_ctx': multimodal_ctx,
|
|
176
|
+
'stop': False,
|
|
177
|
+
'silent': False, # silent mode (without error messages)
|
|
251
178
|
})
|
|
252
|
-
|
|
179
|
+
dispatch(event)
|
|
253
180
|
text = event.data['value']
|
|
181
|
+
stop = event.data.get('stop', False)
|
|
182
|
+
silent = event.data.get('silent', False)
|
|
254
183
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
# allow empty text input only if multimodal data, otherwise abort
|
|
260
|
-
is_audio = multimodal_ctx is not None and multimodal_ctx.is_audio_input
|
|
261
|
-
if len(text.strip()) == 0 and (not camera_captured and not is_audio):
|
|
262
|
-
self.generating = False # unlock as not generating
|
|
263
|
-
return
|
|
264
|
-
|
|
265
|
-
# check API key, show monit if no API key for current provider
|
|
266
|
-
model = self.window.core.config.get('model')
|
|
267
|
-
if model:
|
|
268
|
-
model_data = self.window.core.models.get(model)
|
|
269
|
-
if not self.window.controller.chat.common.check_api_key(mode=mode, model=model_data, monit=False):
|
|
270
|
-
self.window.controller.chat.common.check_api_key(mode=mode, model=model_data, monit=True)
|
|
271
|
-
self.generating = False
|
|
272
|
-
self.window.dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
|
|
184
|
+
if stop: # abort via event
|
|
185
|
+
self.generating = False
|
|
186
|
+
if not silent:
|
|
187
|
+
dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
|
|
273
188
|
"id": "chat",
|
|
274
189
|
}))
|
|
275
|
-
|
|
190
|
+
return
|
|
276
191
|
|
|
277
192
|
# set state to: busy
|
|
278
|
-
|
|
193
|
+
dispatch(KernelEvent(KernelEvent.STATE_BUSY, {
|
|
279
194
|
"id": "chat",
|
|
280
195
|
"msg": trans('status.sending'),
|
|
281
196
|
}))
|
|
282
197
|
|
|
283
198
|
# clear input field if clear-on-send is enabled
|
|
284
|
-
if
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
# prepare ctx, create new ctx meta if there is no ctx, or no ctx selected
|
|
288
|
-
if self.window.core.ctx.count_meta() == 0 or self.window.core.ctx.get_current() is None:
|
|
289
|
-
self.window.core.ctx.new()
|
|
290
|
-
self.window.controller.ctx.update()
|
|
291
|
-
self.log("New context created...") # log
|
|
292
|
-
else:
|
|
293
|
-
# check if current ctx is allowed for this mode - if not, then auto-create new ctx
|
|
294
|
-
self.window.controller.ctx.handle_allowed(mode)
|
|
199
|
+
if core.config.get('send_clear') and not force and not internal:
|
|
200
|
+
dispatch(RenderEvent(RenderEvent.CLEAR_INPUT))
|
|
295
201
|
|
|
296
|
-
#
|
|
297
|
-
|
|
202
|
+
# create ctx, handle allowed, etc.
|
|
203
|
+
dispatch(Event(Event.INPUT_ACCEPT, {
|
|
204
|
+
'value': text,
|
|
205
|
+
'multimodal_ctx': multimodal_ctx,
|
|
206
|
+
'mode': mode,
|
|
207
|
+
}))
|
|
298
208
|
|
|
299
209
|
# send input to API
|
|
300
210
|
if mode == MODE_IMAGE:
|
|
301
|
-
|
|
211
|
+
controller.chat.image.send(
|
|
302
212
|
text=text,
|
|
303
213
|
prev_ctx=prev_ctx,
|
|
304
|
-
|
|
305
|
-
) # image mode
|
|
214
|
+
) # image generation
|
|
306
215
|
else:
|
|
307
|
-
|
|
308
|
-
self.window.controller.chat.text.send(
|
|
216
|
+
controller.chat.text.send(
|
|
309
217
|
text=text,
|
|
310
218
|
reply=reply,
|
|
311
219
|
internal=internal,
|
|
312
220
|
prev_ctx=prev_ctx,
|
|
313
|
-
parent_id=parent_id,
|
|
314
221
|
multimodal_ctx=multimodal_ctx,
|
|
315
|
-
) # text mode: OpenAI,
|
|
222
|
+
) # text mode: OpenAI, LlamaIndex, etc.
|
|
316
223
|
|
|
317
|
-
def
|
|
224
|
+
def handle_attachment(self, mode: str, text: str) -> bool:
|
|
318
225
|
"""
|
|
319
|
-
|
|
226
|
+
Handle attachments with additional context (not images here)
|
|
320
227
|
|
|
321
|
-
:param
|
|
228
|
+
:param mode: Mode (e.g., MODE_ASSISTANT, MODE_CHAT)
|
|
229
|
+
:param text: Input text
|
|
230
|
+
:return: bool: True if attachments exists, False otherwise
|
|
322
231
|
"""
|
|
323
|
-
self.window.controller
|
|
232
|
+
controller = self.window.controller
|
|
233
|
+
dispatch = self.window.dispatch
|
|
234
|
+
exists = False
|
|
235
|
+
|
|
236
|
+
# handle attachments with additional context (not images here)
|
|
237
|
+
if mode != MODE_ASSISTANT and controller.chat.attachment.has(mode):
|
|
238
|
+
exists = True
|
|
239
|
+
dispatch(KernelEvent(KernelEvent.STATE_BUSY, {
|
|
240
|
+
"id": "chat",
|
|
241
|
+
"msg": "Reading attachments..."
|
|
242
|
+
}))
|
|
243
|
+
try:
|
|
244
|
+
controller.chat.attachment.handle(mode, text)
|
|
245
|
+
except Exception as e:
|
|
246
|
+
dispatch(KernelEvent(KernelEvent.STATE_ERROR, {
|
|
247
|
+
"id": "chat",
|
|
248
|
+
"msg": f"Error reading attachments: {e}"
|
|
249
|
+
}))
|
|
250
|
+
|
|
251
|
+
return exists
|