pygpt-net 2.4.28__py3-none-any.whl → 2.4.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +40 -0
- README.md +62 -5
- pygpt_net/CHANGELOG.txt +40 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/access/__init__.py +5 -5
- pygpt_net/controller/access/control.py +3 -2
- pygpt_net/controller/attachment.py +67 -1
- pygpt_net/controller/audio/__init__.py +34 -6
- pygpt_net/controller/chat/__init__.py +3 -1
- pygpt_net/controller/chat/attachment.py +239 -37
- pygpt_net/controller/chat/audio.py +99 -0
- pygpt_net/controller/chat/input.py +10 -3
- pygpt_net/controller/chat/output.py +4 -1
- pygpt_net/controller/chat/text.py +10 -5
- pygpt_net/controller/dialogs/confirm.py +17 -1
- pygpt_net/controller/kernel/reply.py +5 -8
- pygpt_net/controller/lang/custom.py +3 -1
- pygpt_net/controller/mode.py +2 -1
- pygpt_net/controller/presets/editor.py +11 -2
- pygpt_net/core/access/voice.py +2 -2
- pygpt_net/core/agents/legacy.py +3 -1
- pygpt_net/core/attachments/__init__.py +11 -7
- pygpt_net/core/attachments/context.py +226 -44
- pygpt_net/core/{audio.py → audio/__init__.py} +1 -1
- pygpt_net/core/audio/context.py +34 -0
- pygpt_net/core/bridge/context.py +29 -1
- pygpt_net/core/bridge/worker.py +16 -1
- pygpt_net/core/ctx/__init__.py +4 -1
- pygpt_net/core/db/__init__.py +4 -2
- pygpt_net/core/debug/attachments.py +3 -1
- pygpt_net/core/debug/context.py +5 -1
- pygpt_net/core/debug/presets.py +3 -1
- pygpt_net/core/docker/__init__.py +170 -16
- pygpt_net/core/docker/builder.py +6 -2
- pygpt_net/core/events/event.py +3 -1
- pygpt_net/core/experts/__init__.py +24 -6
- pygpt_net/core/idx/chat.py +55 -4
- pygpt_net/core/idx/indexing.py +123 -15
- pygpt_net/core/modes.py +3 -1
- pygpt_net/core/presets.py +13 -2
- pygpt_net/core/render/markdown/pid.py +2 -1
- pygpt_net/core/render/plain/pid.py +2 -1
- pygpt_net/core/render/web/body.py +34 -12
- pygpt_net/core/render/web/pid.py +2 -1
- pygpt_net/core/render/web/renderer.py +12 -3
- pygpt_net/core/tokens.py +4 -2
- pygpt_net/core/types/mode.py +2 -1
- pygpt_net/data/config/config.json +7 -4
- pygpt_net/data/config/models.json +191 -6
- pygpt_net/data/config/modes.json +11 -5
- pygpt_net/data/config/presets/current.audio.json +34 -0
- pygpt_net/data/config/settings.json +15 -1
- pygpt_net/data/css/web.css +70 -0
- pygpt_net/data/css/web.dark.css +4 -1
- pygpt_net/data/css/web.light.css +1 -1
- pygpt_net/data/locale/locale.de.ini +33 -20
- pygpt_net/data/locale/locale.en.ini +73 -58
- pygpt_net/data/locale/locale.es.ini +33 -20
- pygpt_net/data/locale/locale.fr.ini +35 -22
- pygpt_net/data/locale/locale.it.ini +33 -20
- pygpt_net/data/locale/locale.pl.ini +36 -23
- pygpt_net/data/locale/locale.uk.ini +33 -20
- pygpt_net/data/locale/locale.zh.ini +40 -27
- pygpt_net/data/locale/plugin.cmd_code_interpreter.de.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.en.ini +15 -7
- pygpt_net/data/locale/plugin.cmd_code_interpreter.es.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.fr.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.it.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.pl.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.uk.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.zh.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_files.de.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.en.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.es.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.fr.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.it.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.pl.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.uk.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.zh.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_system.de.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.en.ini +12 -6
- pygpt_net/data/locale/plugin.cmd_system.es.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.fr.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.it.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.pl.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.uk.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.zh.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_web.de.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.en.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.es.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.fr.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.it.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.pl.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.uk.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.zh.ini +5 -5
- pygpt_net/data/locale/plugin.idx_llama_index.de.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.en.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.es.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.fr.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.it.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.pl.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.uk.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.zh.ini +12 -12
- pygpt_net/item/attachment.py +9 -1
- pygpt_net/item/ctx.py +9 -1
- pygpt_net/item/preset.py +5 -1
- pygpt_net/launcher.py +3 -1
- pygpt_net/migrations/Version20241126170000.py +28 -0
- pygpt_net/migrations/__init__.py +3 -1
- pygpt_net/plugin/audio_input/__init__.py +11 -1
- pygpt_net/plugin/audio_input/worker.py +9 -1
- pygpt_net/plugin/audio_output/__init__.py +37 -7
- pygpt_net/plugin/audio_output/worker.py +38 -41
- pygpt_net/plugin/cmd_code_interpreter/__init__.py +51 -35
- pygpt_net/plugin/cmd_code_interpreter/builder.py +16 -4
- pygpt_net/plugin/cmd_code_interpreter/config.py +98 -39
- pygpt_net/plugin/cmd_code_interpreter/docker.py +4 -0
- pygpt_net/plugin/cmd_code_interpreter/ipython/__init__.py +13 -0
- pygpt_net/plugin/cmd_code_interpreter/{ipython.py → ipython/docker_kernel.py} +10 -3
- pygpt_net/plugin/cmd_code_interpreter/ipython/local_kernel.py +220 -0
- pygpt_net/plugin/cmd_code_interpreter/runner.py +5 -5
- pygpt_net/plugin/cmd_mouse_control/__init__.py +4 -2
- pygpt_net/plugin/cmd_system/config.py +50 -0
- pygpt_net/plugin/cmd_system/docker.py +4 -0
- pygpt_net/plugin/idx_llama_index/__init__.py +23 -1
- pygpt_net/plugin/idx_llama_index/worker.py +10 -0
- pygpt_net/plugin/openai_dalle/__init__.py +3 -1
- pygpt_net/plugin/openai_vision/__init__.py +3 -1
- pygpt_net/provider/core/attachment/json_file.py +4 -1
- pygpt_net/provider/core/config/patch.py +25 -0
- pygpt_net/provider/core/ctx/db_sqlite/storage.py +14 -4
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +19 -2
- pygpt_net/provider/core/model/patch.py +7 -1
- pygpt_net/provider/core/preset/json_file.py +5 -1
- pygpt_net/provider/gpt/__init__.py +14 -2
- pygpt_net/provider/gpt/audio.py +63 -0
- pygpt_net/provider/gpt/chat.py +76 -44
- pygpt_net/provider/gpt/utils.py +27 -0
- pygpt_net/provider/gpt/vision.py +37 -15
- pygpt_net/provider/loaders/base.py +10 -1
- pygpt_net/provider/loaders/web_yt.py +19 -1
- pygpt_net/tools/code_interpreter/__init__.py +1 -0
- pygpt_net/tools/image_viewer/ui/dialogs.py +3 -1
- pygpt_net/ui/dialog/preset.py +3 -1
- pygpt_net/ui/dialog/url.py +29 -0
- pygpt_net/ui/dialogs.py +5 -1
- pygpt_net/ui/layout/chat/attachments.py +42 -6
- pygpt_net/ui/layout/chat/attachments_ctx.py +14 -4
- pygpt_net/ui/layout/chat/attachments_uploaded.py +8 -4
- pygpt_net/ui/layout/toolbox/agent.py +8 -7
- pygpt_net/ui/layout/toolbox/agent_llama.py +5 -4
- pygpt_net/ui/layout/toolbox/prompt.py +8 -6
- pygpt_net/ui/menu/tools.py +17 -11
- pygpt_net/ui/widget/anims/toggles.py +167 -0
- pygpt_net/ui/widget/dialog/url.py +59 -0
- pygpt_net/ui/widget/element/group.py +2 -1
- pygpt_net/ui/widget/lists/attachment.py +22 -17
- pygpt_net/ui/widget/lists/attachment_ctx.py +65 -3
- pygpt_net/ui/widget/option/checkbox.py +69 -5
- pygpt_net/ui/widget/option/cmd.py +4 -5
- pygpt_net/ui/widget/option/toggle.py +62 -0
- pygpt_net/ui/widget/option/toggle_label.py +79 -0
- pygpt_net/ui/widget/textarea/url.py +43 -0
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/METADATA +65 -7
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/RECORD +168 -154
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/LICENSE +0 -0
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/WHEEL +0 -0
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/entry_points.txt +0 -0
pygpt_net/core/bridge/worker.py
CHANGED
@@ -16,7 +16,8 @@ from pygpt_net.core.types import (
|
|
16
16
|
MODE_LANGCHAIN,
|
17
17
|
MODE_LLAMA_INDEX,
|
18
18
|
)
|
19
|
-
from pygpt_net.core.events import KernelEvent
|
19
|
+
from pygpt_net.core.events import KernelEvent, Event
|
20
|
+
|
20
21
|
|
21
22
|
class BridgeSignals(QObject):
|
22
23
|
"""Bridge signals"""
|
@@ -43,6 +44,9 @@ class BridgeWorker(QObject, QRunnable):
|
|
43
44
|
result = False
|
44
45
|
|
45
46
|
try:
|
47
|
+
# POST PROMPT ASYNC: handle post prompt async event
|
48
|
+
self.handle_post_prompt_async()
|
49
|
+
|
46
50
|
# ADDITIONAL CONTEXT: append additional context from attachments
|
47
51
|
self.handle_additional_context()
|
48
52
|
|
@@ -109,6 +113,17 @@ class BridgeWorker(QObject, QRunnable):
|
|
109
113
|
})
|
110
114
|
self.signals.response.emit(event)
|
111
115
|
|
116
|
+
def handle_post_prompt_async(self):
|
117
|
+
"""Handle post prompt async event"""
|
118
|
+
event = Event(Event.POST_PROMPT_ASYNC, {
|
119
|
+
'mode': self.context.mode,
|
120
|
+
'reply': self.context.ctx.reply,
|
121
|
+
'value': self.context.system_prompt,
|
122
|
+
})
|
123
|
+
event.ctx = self.context.ctx
|
124
|
+
self.window.dispatch(event)
|
125
|
+
self.context.system_prompt = event.data['value']
|
126
|
+
|
112
127
|
def handle_additional_context(self):
|
113
128
|
"""Append additional context"""
|
114
129
|
ctx = self.context.ctx
|
pygpt_net/core/ctx/__init__.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -19,6 +19,7 @@ from pygpt_net.core.types import (
|
|
19
19
|
MODE_AGENT,
|
20
20
|
MODE_AGENT_LLAMA,
|
21
21
|
MODE_ASSISTANT,
|
22
|
+
MODE_AUDIO,
|
22
23
|
MODE_CHAT,
|
23
24
|
MODE_COMPLETION,
|
24
25
|
MODE_EXPERT,
|
@@ -78,6 +79,7 @@ class Ctx:
|
|
78
79
|
MODE_LLAMA_INDEX,
|
79
80
|
MODE_AGENT,
|
80
81
|
MODE_EXPERT,
|
82
|
+
MODE_AUDIO,
|
81
83
|
]
|
82
84
|
self.allowed_modes = {
|
83
85
|
MODE_CHAT: self.all_modes,
|
@@ -89,6 +91,7 @@ class Ctx:
|
|
89
91
|
MODE_LLAMA_INDEX: self.all_modes,
|
90
92
|
MODE_AGENT: self.all_modes,
|
91
93
|
MODE_EXPERT: self.all_modes,
|
94
|
+
MODE_AUDIO: self.all_modes,
|
92
95
|
MODE_AGENT_LLAMA: [MODE_AGENT_LLAMA],
|
93
96
|
}
|
94
97
|
self.current_sys_prompt = ""
|
pygpt_net/core/db/__init__.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -82,6 +82,8 @@ class Database:
|
|
82
82
|
'is_internal',
|
83
83
|
'docs_json',
|
84
84
|
'external_id',
|
85
|
+
'audio_id',
|
86
|
+
'audio_expires_ts',
|
85
87
|
]
|
86
88
|
columns["ctx_meta"] = [
|
87
89
|
'id',
|
@@ -218,7 +220,7 @@ class Database:
|
|
218
220
|
'columns': columns["ctx_item"],
|
219
221
|
'sort_by': columns["ctx_item"],
|
220
222
|
'search_fields': ['id', 'input', 'output', 'input_name', 'output_name', 'meta_id', 'hidden_input', 'hidden_output'],
|
221
|
-
'timestamp_columns': ['input_ts', 'output_ts'],
|
223
|
+
'timestamp_columns': ['input_ts', 'output_ts', 'audio_expires_ts'],
|
222
224
|
'json_columns': ['cmds_json', 'results_json', 'urls_json', 'images_json', 'files_json', 'attachments_json', 'docs_json', 'additional_ctx_json'],
|
223
225
|
'default_sort': 'id',
|
224
226
|
'default_order': 'DESC',
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 02:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
class AttachmentsDebug:
|
@@ -36,6 +36,8 @@ class AttachmentsDebug:
|
|
36
36
|
'send': attachment.send,
|
37
37
|
'key': key,
|
38
38
|
'mode': mode,
|
39
|
+
'type': attachment.type,
|
40
|
+
'consumed': attachment.consumed,
|
39
41
|
}
|
40
42
|
self.window.core.debug.add(self.id, attachment.name, str(data))
|
41
43
|
|
pygpt_net/core/debug/context.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
import json
|
12
12
|
|
@@ -45,6 +45,10 @@ class ContextDebug:
|
|
45
45
|
self.window.core.debug.add(self.id, 'CMD (current)', str(self.window.core.ctx.current_cmd))
|
46
46
|
self.window.core.debug.add(self.id, 'CMD schema (current)', str(self.window.core.ctx.current_cmd_schema))
|
47
47
|
self.window.core.debug.add(self.id, 'FUNCTIONS (current)', str(self.get_functions()))
|
48
|
+
self.window.core.debug.add(self.id, 'Attachments: last used content',
|
49
|
+
str(self.window.core.attachments.context.last_used_content))
|
50
|
+
self.window.core.debug.add(self.id, 'Attachments: last used context',
|
51
|
+
str(self.window.core.attachments.context.last_used_context))
|
48
52
|
|
49
53
|
current = None
|
50
54
|
if self.window.core.ctx.get_current() is not None:
|
pygpt_net/core/debug/presets.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -15,6 +15,7 @@ from pygpt_net.core.types import (
|
|
15
15
|
MODE_AGENT,
|
16
16
|
MODE_AGENT_LLAMA,
|
17
17
|
MODE_ASSISTANT,
|
18
|
+
MODE_AUDIO,
|
18
19
|
MODE_CHAT,
|
19
20
|
MODE_COMPLETION,
|
20
21
|
MODE_EXPERT,
|
@@ -64,6 +65,7 @@ class PresetsDebug:
|
|
64
65
|
MODE_AGENT: preset.agent,
|
65
66
|
MODE_AGENT_LLAMA: preset.agent_llama,
|
66
67
|
MODE_EXPERT: preset.expert,
|
68
|
+
MODE_AUDIO: preset.audio,
|
67
69
|
'temperature': preset.temperature,
|
68
70
|
'version': preset.version,
|
69
71
|
}
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.24 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -136,8 +136,10 @@ class Docker:
|
|
136
136
|
:param name: Container name.
|
137
137
|
"""
|
138
138
|
client = self.get_docker_client()
|
139
|
-
local_data_dir = self.get_local_data_dir()
|
140
139
|
image_name = self.get_image_name()
|
140
|
+
entrypoint = self.get_entrypoint()
|
141
|
+
volumes = self.get_volumes()
|
142
|
+
ports = self.get_ports()
|
141
143
|
|
142
144
|
try:
|
143
145
|
container = client.containers.get(name)
|
@@ -150,15 +152,11 @@ class Docker:
|
|
150
152
|
container = client.containers.create(
|
151
153
|
image=image_name,
|
152
154
|
name=name,
|
153
|
-
volumes=
|
154
|
-
|
155
|
-
'bind': '/data',
|
156
|
-
'mode': 'rw',
|
157
|
-
}
|
158
|
-
},
|
155
|
+
volumes=volumes,
|
156
|
+
ports=ports,
|
159
157
|
tty=True,
|
160
158
|
stdin_open=True,
|
161
|
-
command=
|
159
|
+
command=entrypoint,
|
162
160
|
)
|
163
161
|
container.start()
|
164
162
|
except docker.errors.NotFound:
|
@@ -166,20 +164,176 @@ class Docker:
|
|
166
164
|
container = client.containers.create(
|
167
165
|
image=image_name,
|
168
166
|
name=name,
|
169
|
-
volumes=
|
170
|
-
|
171
|
-
'bind': '/data',
|
172
|
-
'mode': 'rw',
|
173
|
-
}
|
174
|
-
},
|
167
|
+
volumes=volumes,
|
168
|
+
ports=ports,
|
175
169
|
tty=True,
|
176
170
|
stdin_open=True,
|
177
|
-
command=
|
171
|
+
command=entrypoint,
|
178
172
|
)
|
179
173
|
container.start()
|
180
174
|
except Exception as e:
|
181
175
|
self.log(f"Error creating container: {e}")
|
182
176
|
|
177
|
+
def restart_container(self, name: str):
|
178
|
+
"""
|
179
|
+
Restart the Docker container.
|
180
|
+
|
181
|
+
:param name: Container name.
|
182
|
+
"""
|
183
|
+
client = self.get_docker_client()
|
184
|
+
image_name = self.get_image_name()
|
185
|
+
entrypoint = self.get_entrypoint()
|
186
|
+
volumes = self.get_volumes()
|
187
|
+
ports = self.get_ports()
|
188
|
+
|
189
|
+
try:
|
190
|
+
container = client.containers.get(name)
|
191
|
+
container.reload()
|
192
|
+
status = container.status
|
193
|
+
print(f"Container '{name}' status: {status}")
|
194
|
+
|
195
|
+
if status == 'running':
|
196
|
+
print(f"Stopping and starting container '{name}'...")
|
197
|
+
container.stop()
|
198
|
+
container.wait()
|
199
|
+
container.reload()
|
200
|
+
|
201
|
+
elif status == 'paused':
|
202
|
+
print(f"Resuming and starting container '{name}'...")
|
203
|
+
container.unpause()
|
204
|
+
container.stop()
|
205
|
+
container.wait()
|
206
|
+
container.reload()
|
207
|
+
|
208
|
+
elif status in ['exited', 'created']:
|
209
|
+
print(f"Container '{name}' is in state '{status}'. Starting it.")
|
210
|
+
|
211
|
+
elif status == 'restarting':
|
212
|
+
print(f"Container '{name}' is restarting. Waiting...")
|
213
|
+
container.wait()
|
214
|
+
container.reload()
|
215
|
+
|
216
|
+
elif status == 'removing':
|
217
|
+
print(f"Container '{name}' is being removed. Waiting...")
|
218
|
+
container.wait()
|
219
|
+
container = None
|
220
|
+
|
221
|
+
elif status == 'dead':
|
222
|
+
print(f"Container '{name}' is dead. Removing and creating a new one.")
|
223
|
+
container.remove()
|
224
|
+
container = None
|
225
|
+
|
226
|
+
else:
|
227
|
+
print(f"Unknown container status: {status}. Removing and creating a new one.")
|
228
|
+
container.remove()
|
229
|
+
container = None
|
230
|
+
|
231
|
+
if container:
|
232
|
+
print(f"Starting container '{name}'...")
|
233
|
+
try:
|
234
|
+
container.start()
|
235
|
+
container.reload()
|
236
|
+
if container.status != 'running':
|
237
|
+
print(f"Container '{name}' did not start correctly. Status: {container.status}")
|
238
|
+
print(f"Removing and creating a new container '{name}'...")
|
239
|
+
container.remove()
|
240
|
+
container = None
|
241
|
+
except Exception as e:
|
242
|
+
print(f"Error starting container '{name}': {e}")
|
243
|
+
print(f"Removing and creating a new container '{name}'...")
|
244
|
+
container.remove()
|
245
|
+
container = None
|
246
|
+
|
247
|
+
if not container:
|
248
|
+
print(f"Creating a new container '{name}'...")
|
249
|
+
container = client.containers.create(
|
250
|
+
image=image_name,
|
251
|
+
name=name,
|
252
|
+
volumes=volumes,
|
253
|
+
ports=ports,
|
254
|
+
tty=True,
|
255
|
+
stdin_open=True,
|
256
|
+
command=entrypoint, # 'running'
|
257
|
+
)
|
258
|
+
container.start()
|
259
|
+
container.reload()
|
260
|
+
if container.status != 'running':
|
261
|
+
print(f"Container '{name}' did not start correctly. Status: {container.status}")
|
262
|
+
else:
|
263
|
+
print(f"Container '{name}' started successfully.")
|
264
|
+
|
265
|
+
except docker.errors.NotFound:
|
266
|
+
print(f"Container '{name}' not found. Creating a new one.")
|
267
|
+
container = client.containers.create(
|
268
|
+
image=image_name,
|
269
|
+
name=name,
|
270
|
+
volumes=volumes,
|
271
|
+
ports=ports,
|
272
|
+
tty=True,
|
273
|
+
stdin_open=True,
|
274
|
+
command=entrypoint, # 'running'
|
275
|
+
)
|
276
|
+
container.start()
|
277
|
+
container.reload()
|
278
|
+
if container.status != 'running':
|
279
|
+
print(f"Container '{name}' did not start correctly. Status: {container.status}")
|
280
|
+
else:
|
281
|
+
print(f"Container '{name}' started successfully.")
|
282
|
+
except Exception as e:
|
283
|
+
print(f"Error restarting container '{name}': {e}")
|
284
|
+
|
285
|
+
def restart(self):
|
286
|
+
"""Restart the Docker container."""
|
287
|
+
self.restart_container(self.get_container_name())
|
288
|
+
|
289
|
+
def get_volumes(self) -> dict:
|
290
|
+
"""
|
291
|
+
Get the volumes mappings.
|
292
|
+
|
293
|
+
:return: Volumes mappings.
|
294
|
+
"""
|
295
|
+
workdir = self.get_local_data_dir()
|
296
|
+
config = self.plugin.get_option_value('docker_volumes')
|
297
|
+
data = {}
|
298
|
+
for item in config:
|
299
|
+
if item['enabled']:
|
300
|
+
host_dir = item['host'].format(workdir=workdir)
|
301
|
+
data[host_dir] = {
|
302
|
+
'bind': item['docker'],
|
303
|
+
'mode': 'rw',
|
304
|
+
}
|
305
|
+
return data
|
306
|
+
|
307
|
+
def get_ports(self) -> dict:
|
308
|
+
"""
|
309
|
+
Get the ports mappings.
|
310
|
+
|
311
|
+
:return: Ports mappings.
|
312
|
+
"""
|
313
|
+
config = self.plugin.get_option_value('docker_ports')
|
314
|
+
data = {}
|
315
|
+
for item in config:
|
316
|
+
if item['enabled']:
|
317
|
+
docker_port = item['docker']
|
318
|
+
try:
|
319
|
+
host_port = int(item['host'])
|
320
|
+
except ValueError:
|
321
|
+
print("WARNING: Invalid host port number: {}. "
|
322
|
+
"Please provide a valid port number as integer value".format(item['host']))
|
323
|
+
continue
|
324
|
+
if "/" not in docker_port:
|
325
|
+
docker_port = f"{docker_port}/tcp"
|
326
|
+
data[docker_port] = host_port
|
327
|
+
return data
|
328
|
+
|
329
|
+
def get_entrypoint(self) -> str:
|
330
|
+
"""
|
331
|
+
Get the Docker entrypoint.
|
332
|
+
|
333
|
+
:return: Docker entrypoint command.
|
334
|
+
"""
|
335
|
+
return self.plugin.get_option_value('docker_entrypoint')
|
336
|
+
|
183
337
|
def execute(self, cmd: str) -> bytes or None:
|
184
338
|
"""
|
185
339
|
Execute command in Docker container.
|
pygpt_net/core/docker/builder.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.24 22:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from PySide6.QtCore import Signal, Slot, QObject
|
@@ -24,12 +24,13 @@ class Builder(QObject):
|
|
24
24
|
self.docker = None
|
25
25
|
self.worker = None
|
26
26
|
|
27
|
-
def build_image(self):
|
27
|
+
def build_image(self, restart: bool = False):
|
28
28
|
"""Run image build"""
|
29
29
|
try:
|
30
30
|
self.worker = Worker()
|
31
31
|
self.worker.plugin = self.plugin
|
32
32
|
self.worker.docker = self.docker
|
33
|
+
self.worker.restart = restart
|
33
34
|
self.worker.signals.build_finished.connect(self.handle_build_finished)
|
34
35
|
self.worker.signals.error.connect(self.handle_build_failed)
|
35
36
|
self.plugin.window.threadpool.start(self.worker)
|
@@ -65,11 +66,14 @@ class Worker(BaseWorker):
|
|
65
66
|
self.kwargs = kwargs
|
66
67
|
self.docker = None
|
67
68
|
self.plugin = None
|
69
|
+
self.restart = False
|
68
70
|
|
69
71
|
@Slot()
|
70
72
|
def run(self):
|
71
73
|
try:
|
72
74
|
self.docker.build_image()
|
73
75
|
self.signals.build_finished.emit()
|
76
|
+
if self.restart:
|
77
|
+
self.docker.restart()
|
74
78
|
except Exception as e:
|
75
79
|
self.signals.error.emit(e)
|
pygpt_net/core/events/event.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -27,6 +27,7 @@ class Event(BaseEvent):
|
|
27
27
|
AUDIO_INPUT_TOGGLE = "audio.input.toggle"
|
28
28
|
AUDIO_OUTPUT_STOP = "audio.output.stop"
|
29
29
|
AUDIO_OUTPUT_TOGGLE = "audio.output.toggle"
|
30
|
+
AUDIO_PLAYBACK = "audio.playback"
|
30
31
|
AUDIO_READ_TEXT = "audio.read_text"
|
31
32
|
CMD_EXECUTE = "cmd.execute"
|
32
33
|
CMD_INLINE = "cmd.inline"
|
@@ -48,6 +49,7 @@ class Event(BaseEvent):
|
|
48
49
|
PLUGIN_SETTINGS_CHANGED = "plugin.settings.changed"
|
49
50
|
PLUGIN_OPTION_GET = "plugin.option.get"
|
50
51
|
POST_PROMPT = "post.prompt"
|
52
|
+
POST_PROMPT_ASYNC = "post.prompt.async"
|
51
53
|
PRE_PROMPT = "pre.prompt"
|
52
54
|
SYSTEM_PROMPT = "system.prompt"
|
53
55
|
TOOL_OUTPUT_RENDER = "tool.output.render"
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
|
@@ -18,6 +18,7 @@ from pygpt_net.core.types import (
|
|
18
18
|
MODE_LANGCHAIN,
|
19
19
|
MODE_LLAMA_INDEX,
|
20
20
|
MODE_VISION,
|
21
|
+
MODE_AUDIO,
|
21
22
|
)
|
22
23
|
from pygpt_net.core.bridge.context import BridgeContext
|
23
24
|
from pygpt_net.core.events import Event, KernelEvent, RenderEvent
|
@@ -39,6 +40,7 @@ class Experts:
|
|
39
40
|
MODE_VISION,
|
40
41
|
MODE_LANGCHAIN,
|
41
42
|
MODE_LLAMA_INDEX,
|
43
|
+
MODE_AUDIO,
|
42
44
|
]
|
43
45
|
self.allowed_cmds = ["expert_call"]
|
44
46
|
|
@@ -60,7 +62,7 @@ class Experts:
|
|
60
62
|
|
61
63
|
:return: True if stopped
|
62
64
|
"""
|
63
|
-
return self.window.controller.
|
65
|
+
return self.window.controller.kernel.stopped()
|
64
66
|
|
65
67
|
def agent_enabled(self) -> bool:
|
66
68
|
"""
|
@@ -235,14 +237,20 @@ class Experts:
|
|
235
237
|
"""
|
236
238
|
Re-send response from commands to master expert
|
237
239
|
|
240
|
+
If command has been called by expert then response for command is send here to parent
|
241
|
+
|
238
242
|
:param ctx: context item
|
239
243
|
"""
|
240
244
|
if self.stopped():
|
241
245
|
return
|
242
246
|
|
247
|
+
# master meta is here, ctx.meta = MASTER META ID here!
|
248
|
+
|
243
249
|
# make copy of ctx for reply, and change input name to expert name
|
244
250
|
reply_ctx = CtxItem()
|
245
251
|
reply_ctx.from_dict(ctx.to_dict())
|
252
|
+
|
253
|
+
# reply ctx has no meta here!!!!
|
246
254
|
reply_ctx.input_name = "Expert"
|
247
255
|
reply_ctx.output_name = ""
|
248
256
|
reply_ctx.sub_call = True # this flag is not copied in to_dict
|
@@ -327,6 +335,7 @@ class Experts:
|
|
327
335
|
ctx.set_input(query, str(ai_name))
|
328
336
|
ctx.set_output(None, expert_name)
|
329
337
|
ctx.sub_call = True # mark as sub-call
|
338
|
+
ctx.pid = master_ctx.pid # copy PID from parent to allow reply
|
330
339
|
|
331
340
|
# render: begin
|
332
341
|
event = RenderEvent(RenderEvent.BEGIN, {
|
@@ -380,11 +389,12 @@ class Experts:
|
|
380
389
|
)
|
381
390
|
self.window.controller.chat.common.lock_input() # lock input
|
382
391
|
event = KernelEvent(KernelEvent.CALL, {
|
383
|
-
'context': bridge_context,
|
392
|
+
'context': bridge_context, # call using slave ctx history
|
384
393
|
'extra': {},
|
385
394
|
})
|
386
395
|
self.window.dispatch(event)
|
387
396
|
result = event.data.get("response")
|
397
|
+
|
388
398
|
if not result: # abort if bridge call failed
|
389
399
|
return
|
390
400
|
|
@@ -395,26 +405,34 @@ class Experts:
|
|
395
405
|
self.window.core.ctx.update_item(ctx)
|
396
406
|
|
397
407
|
ctx.from_previous() # append previous result if exists
|
408
|
+
|
409
|
+
# tmp switch meta for render purposes
|
410
|
+
ctx.meta = master_ctx.meta
|
398
411
|
self.window.controller.chat.output.handle(
|
399
412
|
ctx=ctx,
|
400
413
|
mode=mode,
|
401
414
|
stream_mode=False,
|
402
415
|
)
|
403
416
|
ctx.clear_reply() # reset results
|
417
|
+
ctx.meta = slave # restore before cmd execute
|
418
|
+
|
404
419
|
self.window.controller.chat.command.handle(ctx) # handle cmds
|
405
420
|
self.window.controller.kernel.stack.handle() # handle command queue
|
406
421
|
|
422
|
+
# if command to execute then end here, and reply is returned to reply() above from stack, and ctx.reply = TRUE here
|
423
|
+
#
|
407
424
|
ctx.from_previous() # append previous result again before save
|
408
425
|
self.window.core.ctx.update_item(ctx) # update ctx in DB
|
409
426
|
|
410
|
-
# if commands reply after bridge call, then stop (already handled in dispatcher)
|
427
|
+
# if commands reply after bridge call, then stop (already handled in sync dispatcher)
|
411
428
|
if ctx.reply:
|
412
429
|
return
|
413
430
|
|
414
431
|
# make copy of ctx for reply, and change input name to expert name
|
415
432
|
reply_ctx = CtxItem()
|
416
|
-
|
433
|
+
|
417
434
|
reply_ctx.from_dict(ctx.to_dict())
|
435
|
+
reply_ctx.meta = master_ctx.meta
|
418
436
|
|
419
437
|
# assign expert output
|
420
438
|
reply_ctx.output = result
|
@@ -423,7 +441,7 @@ class Experts:
|
|
423
441
|
reply_ctx.cmds = [] # clear cmds
|
424
442
|
reply_ctx.sub_call = True # this flag is not copied in to_dict
|
425
443
|
|
426
|
-
#
|
444
|
+
# only if no command call, return final result to main
|
427
445
|
context = BridgeContext()
|
428
446
|
context.ctx = reply_ctx
|
429
447
|
context.prompt = "@"+expert_id+" says:\n\n" + str(reply_ctx.output)
|
pygpt_net/core/idx/chat.py
CHANGED
@@ -419,12 +419,63 @@ class Chat:
|
|
419
419
|
model = self.window.core.models.from_defaults()
|
420
420
|
service_context = self.window.core.idx.llm.get_service_context(model=model)
|
421
421
|
index = self.storage.get_ctx_idx(path, service_context=service_context)
|
422
|
+
|
423
|
+
# 1. try to retrieve directly from index
|
424
|
+
retriever = index.as_retriever()
|
425
|
+
nodes = retriever.retrieve(query)
|
426
|
+
response = ""
|
427
|
+
for node in nodes:
|
428
|
+
if node.score > 0.5:
|
429
|
+
response = node.text
|
430
|
+
break
|
431
|
+
output = ""
|
432
|
+
if response:
|
433
|
+
output = str(response)
|
434
|
+
else:
|
435
|
+
# 2. try with prepared prompt
|
436
|
+
prompt = """
|
437
|
+
# Task
|
438
|
+
Translate the below user prompt into a suitable, short query for the RAG engine, so it can fetch the context
|
439
|
+
related to the query from the vector database.
|
440
|
+
|
441
|
+
# Important rules
|
442
|
+
1. Edit the user prompt in a way that allows for the best possible result.
|
443
|
+
2. In your response, give me only the reworded query, without any additional information from yourself.
|
444
|
+
|
445
|
+
# User prompt:
|
446
|
+
```{prompt}```
|
447
|
+
""".format(prompt=query)
|
448
|
+
response_prepare = index.as_query_engine(
|
449
|
+
llm=service_context.llm,
|
450
|
+
streaming=False,
|
451
|
+
).query(prompt)
|
452
|
+
if response_prepare:
|
453
|
+
# try the final query with prepared prompt
|
454
|
+
final_response = index.as_query_engine(
|
455
|
+
llm=service_context.llm,
|
456
|
+
streaming=False,
|
457
|
+
).query(response_prepare.response)
|
458
|
+
if final_response:
|
459
|
+
output = str(final_response.response)
|
460
|
+
return output
|
461
|
+
|
462
|
+
def query_retrieval(
|
463
|
+
self,
|
464
|
+
query: str,
|
465
|
+
idx: str,
|
466
|
+
model: ModelItem = None
|
467
|
+
) -> str:
|
422
468
|
"""
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
469
|
+
Query attachment
|
470
|
+
|
471
|
+
:param query: query
|
472
|
+
:param idx: index id
|
473
|
+
:param model: model
|
474
|
+
:return: response
|
427
475
|
"""
|
476
|
+
if model is None:
|
477
|
+
model = self.window.core.models.from_defaults()
|
478
|
+
index, service_context = self.get_index(idx, model)
|
428
479
|
retriever = index.as_retriever()
|
429
480
|
nodes = retriever.retrieve(query)
|
430
481
|
response = ""
|