pygpt-net 2.4.28__py3-none-any.whl → 2.4.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +40 -0
- README.md +62 -5
- pygpt_net/CHANGELOG.txt +40 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/access/__init__.py +5 -5
- pygpt_net/controller/access/control.py +3 -2
- pygpt_net/controller/attachment.py +67 -1
- pygpt_net/controller/audio/__init__.py +34 -6
- pygpt_net/controller/chat/__init__.py +3 -1
- pygpt_net/controller/chat/attachment.py +239 -37
- pygpt_net/controller/chat/audio.py +99 -0
- pygpt_net/controller/chat/input.py +10 -3
- pygpt_net/controller/chat/output.py +4 -1
- pygpt_net/controller/chat/text.py +10 -5
- pygpt_net/controller/dialogs/confirm.py +17 -1
- pygpt_net/controller/kernel/reply.py +5 -8
- pygpt_net/controller/lang/custom.py +3 -1
- pygpt_net/controller/mode.py +2 -1
- pygpt_net/controller/presets/editor.py +11 -2
- pygpt_net/core/access/voice.py +2 -2
- pygpt_net/core/agents/legacy.py +3 -1
- pygpt_net/core/attachments/__init__.py +11 -7
- pygpt_net/core/attachments/context.py +226 -44
- pygpt_net/core/{audio.py → audio/__init__.py} +1 -1
- pygpt_net/core/audio/context.py +34 -0
- pygpt_net/core/bridge/context.py +29 -1
- pygpt_net/core/bridge/worker.py +16 -1
- pygpt_net/core/ctx/__init__.py +4 -1
- pygpt_net/core/db/__init__.py +4 -2
- pygpt_net/core/debug/attachments.py +3 -1
- pygpt_net/core/debug/context.py +5 -1
- pygpt_net/core/debug/presets.py +3 -1
- pygpt_net/core/docker/__init__.py +170 -16
- pygpt_net/core/docker/builder.py +6 -2
- pygpt_net/core/events/event.py +3 -1
- pygpt_net/core/experts/__init__.py +24 -6
- pygpt_net/core/idx/chat.py +55 -4
- pygpt_net/core/idx/indexing.py +123 -15
- pygpt_net/core/modes.py +3 -1
- pygpt_net/core/presets.py +13 -2
- pygpt_net/core/render/markdown/pid.py +2 -1
- pygpt_net/core/render/plain/pid.py +2 -1
- pygpt_net/core/render/web/body.py +34 -12
- pygpt_net/core/render/web/pid.py +2 -1
- pygpt_net/core/render/web/renderer.py +12 -3
- pygpt_net/core/tokens.py +4 -2
- pygpt_net/core/types/mode.py +2 -1
- pygpt_net/data/config/config.json +7 -4
- pygpt_net/data/config/models.json +191 -6
- pygpt_net/data/config/modes.json +11 -5
- pygpt_net/data/config/presets/current.audio.json +34 -0
- pygpt_net/data/config/settings.json +15 -1
- pygpt_net/data/css/web.css +70 -0
- pygpt_net/data/css/web.dark.css +4 -1
- pygpt_net/data/css/web.light.css +1 -1
- pygpt_net/data/locale/locale.de.ini +33 -20
- pygpt_net/data/locale/locale.en.ini +73 -58
- pygpt_net/data/locale/locale.es.ini +33 -20
- pygpt_net/data/locale/locale.fr.ini +35 -22
- pygpt_net/data/locale/locale.it.ini +33 -20
- pygpt_net/data/locale/locale.pl.ini +36 -23
- pygpt_net/data/locale/locale.uk.ini +33 -20
- pygpt_net/data/locale/locale.zh.ini +40 -27
- pygpt_net/data/locale/plugin.cmd_code_interpreter.de.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.en.ini +15 -7
- pygpt_net/data/locale/plugin.cmd_code_interpreter.es.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.fr.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.it.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.pl.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.uk.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_code_interpreter.zh.ini +6 -0
- pygpt_net/data/locale/plugin.cmd_files.de.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.en.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.es.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.fr.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.it.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.pl.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.uk.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.zh.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_system.de.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.en.ini +12 -6
- pygpt_net/data/locale/plugin.cmd_system.es.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.fr.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.it.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.pl.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.uk.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_system.zh.ini +6 -6
- pygpt_net/data/locale/plugin.cmd_web.de.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.en.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.es.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.fr.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.it.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.pl.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.uk.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.zh.ini +5 -5
- pygpt_net/data/locale/plugin.idx_llama_index.de.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.en.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.es.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.fr.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.it.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.pl.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.uk.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.zh.ini +12 -12
- pygpt_net/item/attachment.py +9 -1
- pygpt_net/item/ctx.py +9 -1
- pygpt_net/item/preset.py +5 -1
- pygpt_net/launcher.py +3 -1
- pygpt_net/migrations/Version20241126170000.py +28 -0
- pygpt_net/migrations/__init__.py +3 -1
- pygpt_net/plugin/audio_input/__init__.py +11 -1
- pygpt_net/plugin/audio_input/worker.py +9 -1
- pygpt_net/plugin/audio_output/__init__.py +37 -7
- pygpt_net/plugin/audio_output/worker.py +38 -41
- pygpt_net/plugin/cmd_code_interpreter/__init__.py +51 -35
- pygpt_net/plugin/cmd_code_interpreter/builder.py +16 -4
- pygpt_net/plugin/cmd_code_interpreter/config.py +98 -39
- pygpt_net/plugin/cmd_code_interpreter/docker.py +4 -0
- pygpt_net/plugin/cmd_code_interpreter/ipython/__init__.py +13 -0
- pygpt_net/plugin/cmd_code_interpreter/{ipython.py → ipython/docker_kernel.py} +10 -3
- pygpt_net/plugin/cmd_code_interpreter/ipython/local_kernel.py +220 -0
- pygpt_net/plugin/cmd_code_interpreter/runner.py +5 -5
- pygpt_net/plugin/cmd_mouse_control/__init__.py +4 -2
- pygpt_net/plugin/cmd_system/config.py +50 -0
- pygpt_net/plugin/cmd_system/docker.py +4 -0
- pygpt_net/plugin/idx_llama_index/__init__.py +23 -1
- pygpt_net/plugin/idx_llama_index/worker.py +10 -0
- pygpt_net/plugin/openai_dalle/__init__.py +3 -1
- pygpt_net/plugin/openai_vision/__init__.py +3 -1
- pygpt_net/provider/core/attachment/json_file.py +4 -1
- pygpt_net/provider/core/config/patch.py +25 -0
- pygpt_net/provider/core/ctx/db_sqlite/storage.py +14 -4
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +19 -2
- pygpt_net/provider/core/model/patch.py +7 -1
- pygpt_net/provider/core/preset/json_file.py +5 -1
- pygpt_net/provider/gpt/__init__.py +14 -2
- pygpt_net/provider/gpt/audio.py +63 -0
- pygpt_net/provider/gpt/chat.py +76 -44
- pygpt_net/provider/gpt/utils.py +27 -0
- pygpt_net/provider/gpt/vision.py +37 -15
- pygpt_net/provider/loaders/base.py +10 -1
- pygpt_net/provider/loaders/web_yt.py +19 -1
- pygpt_net/tools/code_interpreter/__init__.py +1 -0
- pygpt_net/tools/image_viewer/ui/dialogs.py +3 -1
- pygpt_net/ui/dialog/preset.py +3 -1
- pygpt_net/ui/dialog/url.py +29 -0
- pygpt_net/ui/dialogs.py +5 -1
- pygpt_net/ui/layout/chat/attachments.py +42 -6
- pygpt_net/ui/layout/chat/attachments_ctx.py +14 -4
- pygpt_net/ui/layout/chat/attachments_uploaded.py +8 -4
- pygpt_net/ui/layout/toolbox/agent.py +8 -7
- pygpt_net/ui/layout/toolbox/agent_llama.py +5 -4
- pygpt_net/ui/layout/toolbox/prompt.py +8 -6
- pygpt_net/ui/menu/tools.py +17 -11
- pygpt_net/ui/widget/anims/toggles.py +167 -0
- pygpt_net/ui/widget/dialog/url.py +59 -0
- pygpt_net/ui/widget/element/group.py +2 -1
- pygpt_net/ui/widget/lists/attachment.py +22 -17
- pygpt_net/ui/widget/lists/attachment_ctx.py +65 -3
- pygpt_net/ui/widget/option/checkbox.py +69 -5
- pygpt_net/ui/widget/option/cmd.py +4 -5
- pygpt_net/ui/widget/option/toggle.py +62 -0
- pygpt_net/ui/widget/option/toggle_label.py +79 -0
- pygpt_net/ui/widget/textarea/url.py +43 -0
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/METADATA +65 -7
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/RECORD +168 -154
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/LICENSE +0 -0
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/WHEEL +0 -0
- {pygpt_net-2.4.28.dist-info → pygpt_net-2.4.34.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -114,52 +114,123 @@ class Attachment(QObject):
|
|
114
114
|
:return: True if uploaded
|
115
115
|
"""
|
116
116
|
self.uploaded = False
|
117
|
+
auto_index = self.window.core.config.get("attachments_auto_index", False)
|
117
118
|
attachments = self.window.core.attachments.get_all(mode, only_files=True)
|
119
|
+
|
118
120
|
if self.is_verbose() and len(attachments) > 0:
|
119
121
|
print("\nUploading attachments...\nWork Mode: {}".format(mode))
|
122
|
+
|
120
123
|
for uuid in attachments:
|
121
124
|
attachment = attachments[uuid]
|
122
|
-
if
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
print("Uploading unpacked from archive: {}".format(path_relative))
|
140
|
-
item = self.window.core.attachments.context.upload(meta, sub_attachment, prompt)
|
141
|
-
if item:
|
142
|
-
item["path"] = os.path.basename(attachment.path) + "/" + path_relative
|
143
|
-
item["size"] = os.path.getsize(path)
|
144
|
-
if meta.additional_ctx is None:
|
145
|
-
meta.additional_ctx = []
|
146
|
-
meta.additional_ctx.append(item)
|
147
|
-
self.uploaded = True
|
148
|
-
sub_attachment.consumed = True
|
149
|
-
attachment.consumed = True
|
150
|
-
self.window.core.filesystem.packer.remove_tmp(tmp_path) # clean
|
151
|
-
else:
|
152
|
-
item = self.window.core.attachments.context.upload(meta, attachment, prompt)
|
153
|
-
if item:
|
154
|
-
if meta.additional_ctx is None:
|
155
|
-
meta.additional_ctx = []
|
156
|
-
meta.additional_ctx.append(item)
|
157
|
-
attachment.consumed = True # allow for deletion
|
125
|
+
if attachment.type == AttachmentItem.TYPE_FILE:
|
126
|
+
result = self.upload_file(
|
127
|
+
attachment=attachment,
|
128
|
+
meta=meta,
|
129
|
+
prompt=prompt,
|
130
|
+
auto_index=auto_index,
|
131
|
+
)
|
132
|
+
if result:
|
133
|
+
self.uploaded = True
|
134
|
+
elif attachment.type == AttachmentItem.TYPE_URL:
|
135
|
+
result = self.upload_web(
|
136
|
+
attachment=attachment,
|
137
|
+
meta=meta,
|
138
|
+
prompt=prompt,
|
139
|
+
auto_index=auto_index,
|
140
|
+
)
|
141
|
+
if result:
|
158
142
|
self.uploaded = True
|
159
143
|
if self.uploaded:
|
160
144
|
self.window.core.ctx.save(meta.id) # save meta
|
161
145
|
return self.uploaded
|
162
146
|
|
147
|
+
def upload_file(
|
148
|
+
self,
|
149
|
+
attachment: AttachmentItem,
|
150
|
+
meta: CtxMeta,
|
151
|
+
prompt: str,
|
152
|
+
auto_index: bool
|
153
|
+
) -> bool:
|
154
|
+
"""
|
155
|
+
Upload file attachment
|
156
|
+
|
157
|
+
:param attachment: AttachmentItem
|
158
|
+
:param meta: CtxMeta
|
159
|
+
:param prompt: User input prompt
|
160
|
+
:param auto_index: Auto index
|
161
|
+
:return: True if uploaded
|
162
|
+
"""
|
163
|
+
uploaded = False
|
164
|
+
if not self.is_allowed(attachment.path):
|
165
|
+
return False
|
166
|
+
if self.window.core.filesystem.packer.is_archive(attachment.path):
|
167
|
+
if self.is_verbose():
|
168
|
+
print("Unpacking archive: {}".format(attachment.path))
|
169
|
+
tmp_path = self.window.core.filesystem.packer.unpack(attachment.path)
|
170
|
+
if tmp_path:
|
171
|
+
for root, dirs, files in os.walk(tmp_path):
|
172
|
+
for file in files:
|
173
|
+
path = os.path.join(root, file)
|
174
|
+
sub_attachment = AttachmentItem()
|
175
|
+
sub_attachment.path = path
|
176
|
+
sub_attachment.name = os.path.basename(path)
|
177
|
+
sub_attachment.consumed = False
|
178
|
+
path_relative = os.path.relpath(path, tmp_path)
|
179
|
+
if self.is_allowed(str(path)):
|
180
|
+
if self.is_verbose():
|
181
|
+
print("Uploading unpacked from archive: {}".format(path_relative))
|
182
|
+
item = self.window.core.attachments.context.upload(
|
183
|
+
meta=meta,
|
184
|
+
attachment=sub_attachment,
|
185
|
+
prompt=prompt,
|
186
|
+
real_path=attachment.path,
|
187
|
+
auto_index=auto_index,
|
188
|
+
)
|
189
|
+
if item:
|
190
|
+
item["path"] = os.path.basename(attachment.path) + "/" + path_relative
|
191
|
+
item["size"] = os.path.getsize(path)
|
192
|
+
if meta.additional_ctx is None:
|
193
|
+
meta.additional_ctx = []
|
194
|
+
meta.additional_ctx.append(item)
|
195
|
+
uploaded = True
|
196
|
+
sub_attachment.consumed = True
|
197
|
+
attachment.consumed = True
|
198
|
+
self.window.core.filesystem.packer.remove_tmp(tmp_path) # clean
|
199
|
+
else:
|
200
|
+
item = self.window.core.attachments.context.upload(
|
201
|
+
meta=meta,
|
202
|
+
attachment=attachment,
|
203
|
+
prompt=prompt,
|
204
|
+
real_path=attachment.path,
|
205
|
+
auto_index=auto_index,
|
206
|
+
)
|
207
|
+
if item:
|
208
|
+
if meta.additional_ctx is None:
|
209
|
+
meta.additional_ctx = []
|
210
|
+
meta.additional_ctx.append(item)
|
211
|
+
attachment.consumed = True # allow for deletion
|
212
|
+
uploaded = True
|
213
|
+
|
214
|
+
return uploaded
|
215
|
+
|
216
|
+
def upload_web(
|
217
|
+
self,
|
218
|
+
attachment: AttachmentItem,
|
219
|
+
meta: CtxMeta,
|
220
|
+
prompt: str,
|
221
|
+
auto_index: bool
|
222
|
+
) -> bool:
|
223
|
+
"""
|
224
|
+
Upload web attachment
|
225
|
+
|
226
|
+
:param attachment: AttachmentItem
|
227
|
+
:param meta: CtxMeta
|
228
|
+
:param prompt: User input prompt
|
229
|
+
:param auto_index: Auto index
|
230
|
+
:return: True if uploaded
|
231
|
+
"""
|
232
|
+
return self.upload_file(attachment, meta, prompt, auto_index)
|
233
|
+
|
163
234
|
def has_context(self, meta: CtxMeta) -> bool:
|
164
235
|
"""
|
165
236
|
Check if has additional context for attachment
|
@@ -203,6 +274,7 @@ class Attachment(QObject):
|
|
203
274
|
if self.is_verbose():
|
204
275
|
print("\nPreparing additional context...\nContext Mode: {}".format(self.mode))
|
205
276
|
|
277
|
+
self.window.core.attachments.context.reset()
|
206
278
|
if self.mode == self.MODE_FULL_CONTEXT:
|
207
279
|
content = self.get_full_context(ctx)
|
208
280
|
elif self.mode == self.MODE_QUERY_CONTEXT:
|
@@ -210,6 +282,14 @@ class Attachment(QObject):
|
|
210
282
|
elif self.mode == self.MODE_QUERY_CONTEXT_SUMMARY:
|
211
283
|
content = self.get_context_summary(ctx)
|
212
284
|
|
285
|
+
# append used files and urls to context
|
286
|
+
files = self.window.core.attachments.context.get_used_files()
|
287
|
+
urls = self.window.core.attachments.context.get_used_urls()
|
288
|
+
if files:
|
289
|
+
ctx.files = files
|
290
|
+
if urls:
|
291
|
+
ctx.urls = urls
|
292
|
+
|
213
293
|
if content:
|
214
294
|
if self.is_verbose():
|
215
295
|
print("\n--- Using additional context ---\n\n{}".format(content))
|
@@ -362,6 +442,128 @@ class Attachment(QObject):
|
|
362
442
|
self.window.core.attachments.context.clear(meta, delete_files=remove_local)
|
363
443
|
self.update_list(meta)
|
364
444
|
|
445
|
+
def select(self, idx: int):
|
446
|
+
"""
|
447
|
+
Select uploaded file
|
448
|
+
|
449
|
+
:param idx: index of file
|
450
|
+
"""
|
451
|
+
pass
|
452
|
+
|
453
|
+
def open_by_idx(self, idx: int):
|
454
|
+
"""
|
455
|
+
Open attachment by index
|
456
|
+
|
457
|
+
:param idx: Index on list
|
458
|
+
"""
|
459
|
+
meta = self.window.core.ctx.get_current_meta()
|
460
|
+
if meta is None or meta.additional_ctx is None:
|
461
|
+
return
|
462
|
+
items = self.window.core.attachments.context.get_all(meta)
|
463
|
+
if idx < len(items):
|
464
|
+
item = items[idx]
|
465
|
+
path = item["path"]
|
466
|
+
if "real_path" in item:
|
467
|
+
path = item["real_path"]
|
468
|
+
if os.path.exists(path) and os.path.isfile(path):
|
469
|
+
print("Opening attachment: {}".format(path))
|
470
|
+
self.window.controller.files.open(path)
|
471
|
+
|
472
|
+
def open_dir_src_by_idx(self, idx: int):
|
473
|
+
"""
|
474
|
+
Open source directory by index
|
475
|
+
|
476
|
+
:param idx: Index on list
|
477
|
+
"""
|
478
|
+
meta = self.window.core.ctx.get_current_meta()
|
479
|
+
if meta is None or meta.additional_ctx is None:
|
480
|
+
return
|
481
|
+
items = self.window.core.attachments.context.get_all(meta)
|
482
|
+
if idx < len(items):
|
483
|
+
item = items[idx]
|
484
|
+
path = item["path"]
|
485
|
+
if "real_path" in item:
|
486
|
+
path = item["real_path"]
|
487
|
+
dir = os.path.dirname(path)
|
488
|
+
if os.path.exists(dir) and os.path.isdir(dir):
|
489
|
+
print("Opening source directory: {}".format(dir))
|
490
|
+
self.window.controller.files.open(dir)
|
491
|
+
|
492
|
+
def open_dir_dest_by_idx(self, idx: int):
|
493
|
+
"""
|
494
|
+
Open destination directory by index
|
495
|
+
|
496
|
+
:param idx: Index on list
|
497
|
+
"""
|
498
|
+
meta = self.window.core.ctx.get_current_meta()
|
499
|
+
if meta is None or meta.additional_ctx is None:
|
500
|
+
return
|
501
|
+
items = self.window.core.attachments.context.get_all(meta)
|
502
|
+
if idx < len(items):
|
503
|
+
item = items[idx]
|
504
|
+
root_dir = self.window.core.attachments.context.get_dir(meta)
|
505
|
+
dir = os.path.join(root_dir, item["uuid"])
|
506
|
+
if os.path.exists(dir) and os.path.isdir(dir):
|
507
|
+
self.window.controller.files.open(dir)
|
508
|
+
print("Opening destination directory: {}".format(dir))
|
509
|
+
|
510
|
+
def has_file_by_idx(self, idx: int) -> bool:
|
511
|
+
"""
|
512
|
+
Check if has file by index
|
513
|
+
|
514
|
+
:param idx: Index on list
|
515
|
+
:return: True if has file
|
516
|
+
"""
|
517
|
+
meta = self.window.core.ctx.get_current_meta()
|
518
|
+
if meta is None or meta.additional_ctx is None:
|
519
|
+
return False
|
520
|
+
items = self.window.core.attachments.context.get_all(meta)
|
521
|
+
if idx < len(items):
|
522
|
+
item = items[idx]
|
523
|
+
path = item["path"]
|
524
|
+
if "real_path" in item:
|
525
|
+
path = item["real_path"]
|
526
|
+
return os.path.exists(path) and os.path.isfile(path)
|
527
|
+
return False
|
528
|
+
|
529
|
+
def has_src_by_idx(self, idx: int) -> bool:
|
530
|
+
"""
|
531
|
+
Check if has source directory by index
|
532
|
+
|
533
|
+
:param idx: Index on list
|
534
|
+
:return: True if has source directory
|
535
|
+
"""
|
536
|
+
meta = self.window.core.ctx.get_current_meta()
|
537
|
+
if meta is None or meta.additional_ctx is None:
|
538
|
+
return False
|
539
|
+
items = self.window.core.attachments.context.get_all(meta)
|
540
|
+
if idx < len(items):
|
541
|
+
item = items[idx]
|
542
|
+
path = item["path"]
|
543
|
+
if "real_path" in item:
|
544
|
+
path = item["real_path"]
|
545
|
+
dir = os.path.dirname(path)
|
546
|
+
return os.path.exists(dir) and os.path.isdir(dir)
|
547
|
+
return False
|
548
|
+
|
549
|
+
def has_dest_by_idx(self, idx: int) -> bool:
|
550
|
+
"""
|
551
|
+
Check if has destination directory by index
|
552
|
+
|
553
|
+
:param idx: Index on list
|
554
|
+
:return: True if has destination directory
|
555
|
+
"""
|
556
|
+
meta = self.window.core.ctx.get_current_meta()
|
557
|
+
if meta is None or meta.additional_ctx is None:
|
558
|
+
return False
|
559
|
+
items = self.window.core.attachments.context.get_all(meta)
|
560
|
+
if idx < len(items):
|
561
|
+
item = items[idx]
|
562
|
+
root_dir = self.window.core.attachments.context.get_dir(meta)
|
563
|
+
dir = os.path.join(root_dir, item["uuid"])
|
564
|
+
return os.path.exists(dir) and os.path.isdir(dir)
|
565
|
+
return False
|
566
|
+
|
365
567
|
@Slot(object)
|
366
568
|
def handle_upload_error(self, error: Exception):
|
367
569
|
"""
|
@@ -0,0 +1,99 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# ================================================== #
|
4
|
+
# This file is a part of PYGPT package #
|
5
|
+
# Website: https://pygpt.net #
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
|
+
# MIT License #
|
8
|
+
# Created By : Marcin Szczygliński #
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
|
+
# ================================================== #
|
11
|
+
|
12
|
+
import base64
|
13
|
+
import os
|
14
|
+
|
15
|
+
from pygpt_net.core.types import (
|
16
|
+
MODE_AUDIO,
|
17
|
+
)
|
18
|
+
from pygpt_net.core.bridge.context import MultimodalContext, BridgeContext
|
19
|
+
from pygpt_net.core.events import KernelEvent
|
20
|
+
from pygpt_net.item.ctx import CtxItem
|
21
|
+
|
22
|
+
|
23
|
+
class Audio:
|
24
|
+
def __init__(self, window=None):
|
25
|
+
"""
|
26
|
+
Chat audio controller
|
27
|
+
|
28
|
+
:param window: Window instance
|
29
|
+
"""
|
30
|
+
self.window = window
|
31
|
+
self.audio_file = "chat_output.wav"
|
32
|
+
self.tmp_input = False
|
33
|
+
self.tmp_output = False
|
34
|
+
|
35
|
+
def setup(self):
|
36
|
+
"""Set up UI"""
|
37
|
+
pass
|
38
|
+
|
39
|
+
def update(self):
|
40
|
+
"""Update input/output audio"""
|
41
|
+
mode = self.window.core.config.get("mode")
|
42
|
+
if mode == MODE_AUDIO:
|
43
|
+
if not self.window.controller.audio.is_output_enabled():
|
44
|
+
self.window.controller.audio.enable_output()
|
45
|
+
self.tmp_output = True
|
46
|
+
else:
|
47
|
+
self.tmp_output = False
|
48
|
+
if not self.window.controller.audio.is_input_enabled():
|
49
|
+
self.window.controller.audio.enable_input()
|
50
|
+
self.tmp_input = True
|
51
|
+
else:
|
52
|
+
self.tmp_input = False
|
53
|
+
else:
|
54
|
+
if self.tmp_output:
|
55
|
+
self.window.controller.audio.disable_output()
|
56
|
+
if self.tmp_input:
|
57
|
+
self.window.controller.audio.disable_input()
|
58
|
+
|
59
|
+
def handle_output(self, ctx: CtxItem):
|
60
|
+
"""
|
61
|
+
Handle output audio
|
62
|
+
|
63
|
+
:param ctx: Context item
|
64
|
+
"""
|
65
|
+
wav_path = os.path.join(self.window.core.config.get_user_path(), self.audio_file)
|
66
|
+
if ctx.is_audio and ctx.audio_output:
|
67
|
+
wav_bytes = base64.b64decode(ctx.audio_output)
|
68
|
+
with open(wav_path, "wb") as f:
|
69
|
+
f.write(wav_bytes)
|
70
|
+
self.window.controller.audio.play_chat_audio(wav_path)
|
71
|
+
|
72
|
+
def handle_input(self, path: str):
|
73
|
+
"""
|
74
|
+
Handle input audio
|
75
|
+
|
76
|
+
:param path: audio file path
|
77
|
+
"""
|
78
|
+
multimodal_ctx = MultimodalContext()
|
79
|
+
with open(path, "rb") as f:
|
80
|
+
multimodal_ctx.audio_data = f.read()
|
81
|
+
multimodal_ctx.is_audio_input = True
|
82
|
+
|
83
|
+
bridge_ctx = BridgeContext()
|
84
|
+
bridge_ctx.prompt = self.window.ui.nodes['input'].toPlainText() # attach text input
|
85
|
+
bridge_ctx.multimodal_ctx = multimodal_ctx
|
86
|
+
event = KernelEvent(KernelEvent.INPUT_USER, {
|
87
|
+
'context': bridge_ctx,
|
88
|
+
'extra': {},
|
89
|
+
})
|
90
|
+
self.window.dispatch(event)
|
91
|
+
|
92
|
+
def enabled(self) -> bool:
|
93
|
+
"""
|
94
|
+
Check if audio mode is enabled
|
95
|
+
|
96
|
+
:return: bool True if enabled
|
97
|
+
"""
|
98
|
+
return self.window.core.config.get("mode") == MODE_AUDIO
|
99
|
+
|
@@ -6,10 +6,11 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.bridge import BridgeContext
|
13
|
+
from pygpt_net.core.bridge.context import MultimodalContext
|
13
14
|
from pygpt_net.core.events import Event, AppEvent, KernelEvent, RenderEvent
|
14
15
|
from pygpt_net.core.types import (
|
15
16
|
MODE_AGENT,
|
@@ -134,6 +135,7 @@ class Input:
|
|
134
135
|
reply = extra.get("reply", False)
|
135
136
|
internal = extra.get("internal", False)
|
136
137
|
parent_id = extra.get("parent_id", None)
|
138
|
+
multimodal_ctx = context.multimodal_ctx
|
137
139
|
self.execute(
|
138
140
|
text=text,
|
139
141
|
force=force,
|
@@ -141,6 +143,7 @@ class Input:
|
|
141
143
|
internal=internal,
|
142
144
|
prev_ctx=prev_ctx,
|
143
145
|
parent_id=parent_id,
|
146
|
+
multimodal_ctx=multimodal_ctx,
|
144
147
|
)
|
145
148
|
|
146
149
|
def execute(
|
@@ -151,6 +154,7 @@ class Input:
|
|
151
154
|
internal: bool = False,
|
152
155
|
prev_ctx: CtxItem = None,
|
153
156
|
parent_id: int = None,
|
157
|
+
multimodal_ctx: MultimodalContext = None,
|
154
158
|
):
|
155
159
|
"""
|
156
160
|
Execute send input text to API
|
@@ -161,6 +165,7 @@ class Input:
|
|
161
165
|
:param internal: internal call
|
162
166
|
:param prev_ctx: previous context (if reply)
|
163
167
|
:param parent_id: parent id (if expert)
|
168
|
+
:param multimodal_ctx: multimodal context
|
164
169
|
"""
|
165
170
|
self.window.dispatch(KernelEvent(KernelEvent.STATE_IDLE, {
|
166
171
|
"id": "chat",
|
@@ -208,8 +213,9 @@ class Input:
|
|
208
213
|
camera_captured = (self.window.controller.ui.vision.has_vision()
|
209
214
|
and self.window.controller.attachment.has(mode))
|
210
215
|
|
211
|
-
# allow empty input only
|
212
|
-
|
216
|
+
# allow empty text input only if multimodal data, otherwise abort
|
217
|
+
is_audio = multimodal_ctx is not None and multimodal_ctx.is_audio_input
|
218
|
+
if len(text.strip()) == 0 and (not camera_captured and not is_audio):
|
213
219
|
self.generating = False # unlock as not generating
|
214
220
|
return
|
215
221
|
|
@@ -256,6 +262,7 @@ class Input:
|
|
256
262
|
internal=internal,
|
257
263
|
prev_ctx=prev_ctx,
|
258
264
|
parent_id=parent_id,
|
265
|
+
multimodal_ctx=multimodal_ctx,
|
259
266
|
) # text mode: OpenAI, Langchain, Llama, etc.
|
260
267
|
|
261
268
|
def log(self, data: any):
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -111,6 +111,9 @@ class Output:
|
|
111
111
|
# update response tokens
|
112
112
|
self.window.controller.chat.common.show_response_tokens(ctx)
|
113
113
|
|
114
|
+
# handle audio output
|
115
|
+
self.window.controller.chat.audio.handle_output(ctx)
|
116
|
+
|
114
117
|
# store to history
|
115
118
|
if self.window.core.config.get('store_history'):
|
116
119
|
self.window.core.history.append(ctx, "output")
|
@@ -6,17 +6,18 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
13
13
|
MODE_AGENT,
|
14
14
|
MODE_AGENT_LLAMA,
|
15
|
+
MODE_AUDIO,
|
15
16
|
MODE_ASSISTANT,
|
16
17
|
MODE_LLAMA_INDEX,
|
17
18
|
)
|
18
19
|
from pygpt_net.core.events import Event, AppEvent, KernelEvent, RenderEvent
|
19
|
-
from pygpt_net.core.bridge.context import BridgeContext
|
20
|
+
from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
|
20
21
|
from pygpt_net.item.ctx import CtxItem
|
21
22
|
from pygpt_net.utils import trans
|
22
23
|
|
@@ -38,6 +39,7 @@ class Text:
|
|
38
39
|
internal: bool = False,
|
39
40
|
prev_ctx: CtxItem = None,
|
40
41
|
parent_id: str = None,
|
42
|
+
multimodal_ctx: MultimodalContext = None,
|
41
43
|
) -> CtxItem:
|
42
44
|
"""
|
43
45
|
Send text message
|
@@ -47,6 +49,7 @@ class Text:
|
|
47
49
|
:param internal: internal call
|
48
50
|
:param prev_ctx: previous context item (if reply)
|
49
51
|
:param parent_id: parent context id
|
52
|
+
:param multimodal_ctx: multimodal context
|
50
53
|
:return: context item
|
51
54
|
"""
|
52
55
|
self.window.update_status(trans('status.sending'))
|
@@ -80,7 +83,7 @@ class Text:
|
|
80
83
|
tools_outputs = [] # tools outputs (assistant only)
|
81
84
|
|
82
85
|
# o1 models: disable stream mode
|
83
|
-
if model.startswith("o1") or mode
|
86
|
+
if model.startswith("o1") or mode in [MODE_AGENT_LLAMA, MODE_AUDIO]:
|
84
87
|
stream_mode = False
|
85
88
|
|
86
89
|
# create ctx item
|
@@ -103,7 +106,7 @@ class Text:
|
|
103
106
|
|
104
107
|
# if reply from expert command
|
105
108
|
if parent_id is not None: # parent_id = reply from expert
|
106
|
-
ctx.meta =
|
109
|
+
# At this point, ctx.meta ID = slave META ID (parent_id is given from slave, not from master)
|
107
110
|
ctx.sub_reply = True # mark as sub reply
|
108
111
|
ctx.input_name = prev_ctx.input_name
|
109
112
|
ctx.output_name = prev_ctx.output_name
|
@@ -147,7 +150,8 @@ class Text:
|
|
147
150
|
if mode == MODE_LLAMA_INDEX:
|
148
151
|
# check if index is selected
|
149
152
|
if self.window.controller.idx.index_selected():
|
150
|
-
disable_native_func_calls = True
|
153
|
+
disable_native_func_calls = True
|
154
|
+
# ^^^ native func calls allowed only for LLM call, not for the query engine
|
151
155
|
|
152
156
|
# build final prompt (+plugins)
|
153
157
|
sys_prompt = self.window.core.prompt.prepare_sys_prompt(
|
@@ -232,6 +236,7 @@ class Text:
|
|
232
236
|
external_functions=functions, # external functions
|
233
237
|
tools_outputs=tools_outputs, # if not empty then will submit outputs to assistant
|
234
238
|
max_tokens=max_tokens, # max output tokens
|
239
|
+
multimodal_ctx=multimodal_ctx, # multimodal context
|
235
240
|
)
|
236
241
|
extra = {
|
237
242
|
'mode': mode,
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 02:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
class Confirm:
|
@@ -349,6 +349,18 @@ class Confirm:
|
|
349
349
|
elif type == 'ctx.group':
|
350
350
|
self.window.controller.ctx.create_group(name, id)
|
351
351
|
|
352
|
+
def accept_url(self, type: str, id: any, url: str):
|
353
|
+
"""
|
354
|
+
Update URL provided
|
355
|
+
|
356
|
+
:param type: dialog type
|
357
|
+
:param id: dialog object id
|
358
|
+
:param url: URL
|
359
|
+
"""
|
360
|
+
# add attachment
|
361
|
+
if type == 'attachment':
|
362
|
+
self.window.controller.attachment.add_url(url)
|
363
|
+
|
352
364
|
def dismiss_rename(self):
|
353
365
|
"""Dismiss rename dialog"""
|
354
366
|
self.window.ui.dialog['rename'].close()
|
@@ -356,3 +368,7 @@ class Confirm:
|
|
356
368
|
def dismiss_create(self):
|
357
369
|
"""Dismiss create dialog"""
|
358
370
|
self.window.ui.dialog['create'].close()
|
371
|
+
|
372
|
+
def dismiss_url(self):
|
373
|
+
"""Dismiss url dialog"""
|
374
|
+
self.window.ui.dialog['url'].close()
|
@@ -6,14 +6,11 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.25 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
13
13
|
|
14
|
-
from pygpt_net.core.types import (
|
15
|
-
MODE_AGENT_LLAMA,
|
16
|
-
)
|
17
14
|
from pygpt_net.core.events import KernelEvent, RenderEvent
|
18
15
|
from pygpt_net.core.bridge import BridgeContext
|
19
16
|
from pygpt_net.item.ctx import CtxItem
|
@@ -31,13 +28,12 @@ class Reply:
|
|
31
28
|
self.last_result = None
|
32
29
|
self.reply_idx = -1
|
33
30
|
|
34
|
-
def add(self, context, extra
|
31
|
+
def add(self, context, extra) -> list:
|
35
32
|
"""
|
36
33
|
Send reply from plugins to model
|
37
34
|
|
38
35
|
:param context: bridge context
|
39
36
|
:param extra: extra data
|
40
|
-
:param flush: flush reply stack
|
41
37
|
:return: list of results
|
42
38
|
"""
|
43
39
|
flush = False
|
@@ -54,8 +50,9 @@ class Reply:
|
|
54
50
|
self.window.core.debug.debug("CTX REPLY: " + str(ctx))
|
55
51
|
if ctx.reply:
|
56
52
|
if self.reply_idx >= ctx.pid: # skip if reply already sent for this context
|
53
|
+
# >>> this prevents multiple replies from the same ctx item <<<
|
57
54
|
return []
|
58
|
-
self.reply_idx = ctx.pid
|
55
|
+
self.reply_idx = ctx.pid
|
59
56
|
self.append(ctx)
|
60
57
|
if flush or self.window.controller.kernel.async_allowed(ctx):
|
61
58
|
self.flush()
|
@@ -70,7 +67,6 @@ class Reply:
|
|
70
67
|
"""
|
71
68
|
self.window.core.debug.info("Reply stack (add)...")
|
72
69
|
self.reply_stack.append(ctx.results)
|
73
|
-
# ctx.cmds = [] # clear commands (disables expand output in render)
|
74
70
|
ctx.results = [] # clear results
|
75
71
|
self.reply_ctx = ctx
|
76
72
|
|
@@ -102,6 +98,7 @@ class Reply:
|
|
102
98
|
self.window.core.ctx.update_item(self.reply_ctx) # update context in db
|
103
99
|
self.window.update_status('...')
|
104
100
|
|
101
|
+
# if response from sub call, from experts
|
105
102
|
parent_id = None
|
106
103
|
if self.reply_ctx.sub_call:
|
107
104
|
if self.reply_ctx.meta is not None:
|