pygpt-net 2.4.30__py3-none-any.whl → 2.4.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +32 -0
- README.md +2105 -1892
- pygpt_net/CHANGELOG.txt +32 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/access/__init__.py +5 -5
- pygpt_net/controller/access/control.py +3 -2
- pygpt_net/controller/attachment.py +68 -1
- pygpt_net/controller/audio/__init__.py +34 -6
- pygpt_net/controller/chat/__init__.py +3 -1
- pygpt_net/controller/chat/attachment.py +263 -38
- pygpt_net/controller/chat/audio.py +99 -0
- pygpt_net/controller/chat/input.py +10 -3
- pygpt_net/controller/chat/output.py +4 -1
- pygpt_net/controller/chat/text.py +7 -3
- pygpt_net/controller/dialogs/confirm.py +17 -1
- pygpt_net/controller/lang/custom.py +3 -1
- pygpt_net/controller/mode.py +2 -1
- pygpt_net/controller/painter/capture.py +2 -2
- pygpt_net/controller/presets/editor.py +15 -2
- pygpt_net/controller/ui/__init__.py +4 -1
- pygpt_net/core/access/voice.py +2 -2
- pygpt_net/core/agents/legacy.py +3 -1
- pygpt_net/core/attachments/__init__.py +14 -9
- pygpt_net/core/attachments/context.py +226 -44
- pygpt_net/core/{audio.py → audio/__init__.py} +1 -1
- pygpt_net/core/audio/context.py +34 -0
- pygpt_net/core/bridge/context.py +29 -1
- pygpt_net/core/ctx/__init__.py +4 -1
- pygpt_net/core/db/__init__.py +4 -2
- pygpt_net/core/debug/attachments.py +3 -1
- pygpt_net/core/debug/context.py +5 -1
- pygpt_net/core/debug/presets.py +3 -1
- pygpt_net/core/events/event.py +2 -1
- pygpt_net/core/experts/__init__.py +3 -1
- pygpt_net/core/idx/chat.py +28 -6
- pygpt_net/core/idx/indexing.py +123 -15
- pygpt_net/core/modes.py +3 -1
- pygpt_net/core/presets.py +13 -2
- pygpt_net/core/render/markdown/pid.py +2 -1
- pygpt_net/core/render/plain/pid.py +2 -1
- pygpt_net/core/render/web/body.py +34 -12
- pygpt_net/core/render/web/pid.py +2 -1
- pygpt_net/core/render/web/renderer.py +8 -3
- pygpt_net/core/tokens.py +4 -2
- pygpt_net/core/types/mode.py +2 -1
- pygpt_net/data/config/config.json +7 -5
- pygpt_net/data/config/models.json +190 -5
- pygpt_net/data/config/modes.json +11 -5
- pygpt_net/data/config/presets/current.audio.json +34 -0
- pygpt_net/data/config/settings.json +15 -1
- pygpt_net/data/css/web.css +70 -0
- pygpt_net/data/css/web.dark.css +4 -1
- pygpt_net/data/css/web.light.css +1 -1
- pygpt_net/data/locale/locale.de.ini +27 -14
- pygpt_net/data/locale/locale.en.ini +63 -47
- pygpt_net/data/locale/locale.es.ini +27 -14
- pygpt_net/data/locale/locale.fr.ini +29 -16
- pygpt_net/data/locale/locale.it.ini +27 -14
- pygpt_net/data/locale/locale.pl.ini +31 -18
- pygpt_net/data/locale/locale.uk.ini +27 -14
- pygpt_net/data/locale/locale.zh.ini +34 -21
- pygpt_net/data/locale/plugin.cmd_files.de.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.en.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.es.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.fr.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.it.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.pl.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.uk.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.zh.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_web.de.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.en.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.es.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.fr.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.it.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.pl.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.uk.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.zh.ini +5 -5
- pygpt_net/data/locale/plugin.idx_llama_index.de.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.en.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.es.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.fr.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.it.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.pl.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.uk.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.zh.ini +12 -12
- pygpt_net/data/win32/USER-LICENSE.rtf +0 -0
- pygpt_net/data/win32/banner.bmp +0 -0
- pygpt_net/data/win32/banner_welcome.bmp +0 -0
- pygpt_net/item/attachment.py +9 -1
- pygpt_net/item/ctx.py +9 -1
- pygpt_net/item/preset.py +5 -1
- pygpt_net/launcher.py +3 -1
- pygpt_net/migrations/Version20241126170000.py +28 -0
- pygpt_net/migrations/__init__.py +3 -1
- pygpt_net/plugin/audio_input/__init__.py +11 -1
- pygpt_net/plugin/audio_input/worker.py +9 -1
- pygpt_net/plugin/audio_output/__init__.py +37 -7
- pygpt_net/plugin/audio_output/worker.py +38 -41
- pygpt_net/plugin/cmd_code_interpreter/runner.py +2 -2
- pygpt_net/plugin/cmd_mouse_control/__init__.py +4 -2
- pygpt_net/plugin/openai_dalle/__init__.py +3 -1
- pygpt_net/plugin/openai_vision/__init__.py +3 -1
- pygpt_net/provider/core/attachment/json_file.py +4 -1
- pygpt_net/provider/core/config/patch.py +22 -0
- pygpt_net/provider/core/ctx/db_sqlite/storage.py +14 -4
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +19 -2
- pygpt_net/provider/core/model/patch.py +7 -1
- pygpt_net/provider/core/preset/json_file.py +5 -1
- pygpt_net/provider/gpt/__init__.py +14 -2
- pygpt_net/provider/gpt/audio.py +63 -0
- pygpt_net/provider/gpt/chat.py +76 -44
- pygpt_net/provider/gpt/utils.py +27 -0
- pygpt_net/provider/gpt/vision.py +37 -15
- pygpt_net/provider/loaders/base.py +10 -1
- pygpt_net/provider/loaders/web_yt.py +19 -1
- pygpt_net/tools/image_viewer/ui/dialogs.py +3 -1
- pygpt_net/ui/dialog/about.py +1 -1
- pygpt_net/ui/dialog/preset.py +3 -1
- pygpt_net/ui/dialog/url.py +29 -0
- pygpt_net/ui/dialogs.py +5 -1
- pygpt_net/ui/layout/chat/attachments.py +42 -6
- pygpt_net/ui/layout/chat/attachments_ctx.py +14 -4
- pygpt_net/ui/layout/chat/attachments_uploaded.py +8 -4
- pygpt_net/ui/widget/anims/toggles.py +2 -2
- pygpt_net/ui/widget/dialog/url.py +59 -0
- pygpt_net/ui/widget/lists/attachment.py +22 -17
- pygpt_net/ui/widget/lists/attachment_ctx.py +65 -3
- pygpt_net/ui/widget/option/checkbox.py +1 -3
- pygpt_net/ui/widget/option/toggle.py +1 -0
- pygpt_net/ui/widget/textarea/url.py +43 -0
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.35.dist-info}/METADATA +2107 -1894
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.35.dist-info}/RECORD +135 -124
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.35.dist-info}/LICENSE +0 -0
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.35.dist-info}/WHEEL +0 -0
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.35.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,99 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# ================================================== #
|
4
|
+
# This file is a part of PYGPT package #
|
5
|
+
# Website: https://pygpt.net #
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
|
+
# MIT License #
|
8
|
+
# Created By : Marcin Szczygliński #
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
|
+
# ================================================== #
|
11
|
+
|
12
|
+
import base64
|
13
|
+
import os
|
14
|
+
|
15
|
+
from pygpt_net.core.types import (
|
16
|
+
MODE_AUDIO,
|
17
|
+
)
|
18
|
+
from pygpt_net.core.bridge.context import MultimodalContext, BridgeContext
|
19
|
+
from pygpt_net.core.events import KernelEvent
|
20
|
+
from pygpt_net.item.ctx import CtxItem
|
21
|
+
|
22
|
+
|
23
|
+
class Audio:
|
24
|
+
def __init__(self, window=None):
|
25
|
+
"""
|
26
|
+
Chat audio controller
|
27
|
+
|
28
|
+
:param window: Window instance
|
29
|
+
"""
|
30
|
+
self.window = window
|
31
|
+
self.audio_file = "chat_output.wav"
|
32
|
+
self.tmp_input = False
|
33
|
+
self.tmp_output = False
|
34
|
+
|
35
|
+
def setup(self):
|
36
|
+
"""Set up UI"""
|
37
|
+
pass
|
38
|
+
|
39
|
+
def update(self):
|
40
|
+
"""Update input/output audio"""
|
41
|
+
mode = self.window.core.config.get("mode")
|
42
|
+
if mode == MODE_AUDIO:
|
43
|
+
if not self.window.controller.audio.is_output_enabled():
|
44
|
+
self.window.controller.audio.enable_output()
|
45
|
+
self.tmp_output = True
|
46
|
+
else:
|
47
|
+
self.tmp_output = False
|
48
|
+
if not self.window.controller.audio.is_input_enabled():
|
49
|
+
self.window.controller.audio.enable_input()
|
50
|
+
self.tmp_input = True
|
51
|
+
else:
|
52
|
+
self.tmp_input = False
|
53
|
+
else:
|
54
|
+
if self.tmp_output:
|
55
|
+
self.window.controller.audio.disable_output()
|
56
|
+
if self.tmp_input:
|
57
|
+
self.window.controller.audio.disable_input()
|
58
|
+
|
59
|
+
def handle_output(self, ctx: CtxItem):
|
60
|
+
"""
|
61
|
+
Handle output audio
|
62
|
+
|
63
|
+
:param ctx: Context item
|
64
|
+
"""
|
65
|
+
wav_path = os.path.join(self.window.core.config.get_user_path(), self.audio_file)
|
66
|
+
if ctx.is_audio and ctx.audio_output:
|
67
|
+
wav_bytes = base64.b64decode(ctx.audio_output)
|
68
|
+
with open(wav_path, "wb") as f:
|
69
|
+
f.write(wav_bytes)
|
70
|
+
self.window.controller.audio.play_chat_audio(wav_path)
|
71
|
+
|
72
|
+
def handle_input(self, path: str):
|
73
|
+
"""
|
74
|
+
Handle input audio
|
75
|
+
|
76
|
+
:param path: audio file path
|
77
|
+
"""
|
78
|
+
multimodal_ctx = MultimodalContext()
|
79
|
+
with open(path, "rb") as f:
|
80
|
+
multimodal_ctx.audio_data = f.read()
|
81
|
+
multimodal_ctx.is_audio_input = True
|
82
|
+
|
83
|
+
bridge_ctx = BridgeContext()
|
84
|
+
bridge_ctx.prompt = self.window.ui.nodes['input'].toPlainText() # attach text input
|
85
|
+
bridge_ctx.multimodal_ctx = multimodal_ctx
|
86
|
+
event = KernelEvent(KernelEvent.INPUT_USER, {
|
87
|
+
'context': bridge_ctx,
|
88
|
+
'extra': {},
|
89
|
+
})
|
90
|
+
self.window.dispatch(event)
|
91
|
+
|
92
|
+
def enabled(self) -> bool:
|
93
|
+
"""
|
94
|
+
Check if audio mode is enabled
|
95
|
+
|
96
|
+
:return: bool True if enabled
|
97
|
+
"""
|
98
|
+
return self.window.core.config.get("mode") == MODE_AUDIO
|
99
|
+
|
@@ -6,10 +6,11 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.bridge import BridgeContext
|
13
|
+
from pygpt_net.core.bridge.context import MultimodalContext
|
13
14
|
from pygpt_net.core.events import Event, AppEvent, KernelEvent, RenderEvent
|
14
15
|
from pygpt_net.core.types import (
|
15
16
|
MODE_AGENT,
|
@@ -134,6 +135,7 @@ class Input:
|
|
134
135
|
reply = extra.get("reply", False)
|
135
136
|
internal = extra.get("internal", False)
|
136
137
|
parent_id = extra.get("parent_id", None)
|
138
|
+
multimodal_ctx = context.multimodal_ctx
|
137
139
|
self.execute(
|
138
140
|
text=text,
|
139
141
|
force=force,
|
@@ -141,6 +143,7 @@ class Input:
|
|
141
143
|
internal=internal,
|
142
144
|
prev_ctx=prev_ctx,
|
143
145
|
parent_id=parent_id,
|
146
|
+
multimodal_ctx=multimodal_ctx,
|
144
147
|
)
|
145
148
|
|
146
149
|
def execute(
|
@@ -151,6 +154,7 @@ class Input:
|
|
151
154
|
internal: bool = False,
|
152
155
|
prev_ctx: CtxItem = None,
|
153
156
|
parent_id: int = None,
|
157
|
+
multimodal_ctx: MultimodalContext = None,
|
154
158
|
):
|
155
159
|
"""
|
156
160
|
Execute send input text to API
|
@@ -161,6 +165,7 @@ class Input:
|
|
161
165
|
:param internal: internal call
|
162
166
|
:param prev_ctx: previous context (if reply)
|
163
167
|
:param parent_id: parent id (if expert)
|
168
|
+
:param multimodal_ctx: multimodal context
|
164
169
|
"""
|
165
170
|
self.window.dispatch(KernelEvent(KernelEvent.STATE_IDLE, {
|
166
171
|
"id": "chat",
|
@@ -208,8 +213,9 @@ class Input:
|
|
208
213
|
camera_captured = (self.window.controller.ui.vision.has_vision()
|
209
214
|
and self.window.controller.attachment.has(mode))
|
210
215
|
|
211
|
-
# allow empty input only
|
212
|
-
|
216
|
+
# allow empty text input only if multimodal data, otherwise abort
|
217
|
+
is_audio = multimodal_ctx is not None and multimodal_ctx.is_audio_input
|
218
|
+
if len(text.strip()) == 0 and (not camera_captured and not is_audio):
|
213
219
|
self.generating = False # unlock as not generating
|
214
220
|
return
|
215
221
|
|
@@ -256,6 +262,7 @@ class Input:
|
|
256
262
|
internal=internal,
|
257
263
|
prev_ctx=prev_ctx,
|
258
264
|
parent_id=parent_id,
|
265
|
+
multimodal_ctx=multimodal_ctx,
|
259
266
|
) # text mode: OpenAI, Langchain, Llama, etc.
|
260
267
|
|
261
268
|
def log(self, data: any):
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -111,6 +111,9 @@ class Output:
|
|
111
111
|
# update response tokens
|
112
112
|
self.window.controller.chat.common.show_response_tokens(ctx)
|
113
113
|
|
114
|
+
# handle audio output
|
115
|
+
self.window.controller.chat.audio.handle_output(ctx)
|
116
|
+
|
114
117
|
# store to history
|
115
118
|
if self.window.core.config.get('store_history'):
|
116
119
|
self.window.core.history.append(ctx, "output")
|
@@ -6,17 +6,18 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
13
13
|
MODE_AGENT,
|
14
14
|
MODE_AGENT_LLAMA,
|
15
|
+
MODE_AUDIO,
|
15
16
|
MODE_ASSISTANT,
|
16
17
|
MODE_LLAMA_INDEX,
|
17
18
|
)
|
18
19
|
from pygpt_net.core.events import Event, AppEvent, KernelEvent, RenderEvent
|
19
|
-
from pygpt_net.core.bridge.context import BridgeContext
|
20
|
+
from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
|
20
21
|
from pygpt_net.item.ctx import CtxItem
|
21
22
|
from pygpt_net.utils import trans
|
22
23
|
|
@@ -38,6 +39,7 @@ class Text:
|
|
38
39
|
internal: bool = False,
|
39
40
|
prev_ctx: CtxItem = None,
|
40
41
|
parent_id: str = None,
|
42
|
+
multimodal_ctx: MultimodalContext = None,
|
41
43
|
) -> CtxItem:
|
42
44
|
"""
|
43
45
|
Send text message
|
@@ -47,6 +49,7 @@ class Text:
|
|
47
49
|
:param internal: internal call
|
48
50
|
:param prev_ctx: previous context item (if reply)
|
49
51
|
:param parent_id: parent context id
|
52
|
+
:param multimodal_ctx: multimodal context
|
50
53
|
:return: context item
|
51
54
|
"""
|
52
55
|
self.window.update_status(trans('status.sending'))
|
@@ -80,7 +83,7 @@ class Text:
|
|
80
83
|
tools_outputs = [] # tools outputs (assistant only)
|
81
84
|
|
82
85
|
# o1 models: disable stream mode
|
83
|
-
if model.startswith("o1") or mode
|
86
|
+
if model.startswith("o1") or mode in [MODE_AGENT_LLAMA, MODE_AUDIO]:
|
84
87
|
stream_mode = False
|
85
88
|
|
86
89
|
# create ctx item
|
@@ -233,6 +236,7 @@ class Text:
|
|
233
236
|
external_functions=functions, # external functions
|
234
237
|
tools_outputs=tools_outputs, # if not empty then will submit outputs to assistant
|
235
238
|
max_tokens=max_tokens, # max output tokens
|
239
|
+
multimodal_ctx=multimodal_ctx, # multimodal context
|
236
240
|
)
|
237
241
|
extra = {
|
238
242
|
'mode': mode,
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 02:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
class Confirm:
|
@@ -349,6 +349,18 @@ class Confirm:
|
|
349
349
|
elif type == 'ctx.group':
|
350
350
|
self.window.controller.ctx.create_group(name, id)
|
351
351
|
|
352
|
+
def accept_url(self, type: str, id: any, url: str):
|
353
|
+
"""
|
354
|
+
Update URL provided
|
355
|
+
|
356
|
+
:param type: dialog type
|
357
|
+
:param id: dialog object id
|
358
|
+
:param url: URL
|
359
|
+
"""
|
360
|
+
# add attachment
|
361
|
+
if type == 'attachment':
|
362
|
+
self.window.controller.attachment.add_url(url)
|
363
|
+
|
352
364
|
def dismiss_rename(self):
|
353
365
|
"""Dismiss rename dialog"""
|
354
366
|
self.window.ui.dialog['rename'].close()
|
@@ -356,3 +368,7 @@ class Confirm:
|
|
356
368
|
def dismiss_create(self):
|
357
369
|
"""Dismiss create dialog"""
|
358
370
|
self.window.ui.dialog['create'].close()
|
371
|
+
|
372
|
+
def dismiss_url(self):
|
373
|
+
"""Dismiss url dialog"""
|
374
|
+
self.window.ui.dialog['url'].close()
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from PySide6.QtCore import Qt
|
@@ -14,6 +14,7 @@ from PySide6.QtCore import Qt
|
|
14
14
|
from pygpt_net.core.types import (
|
15
15
|
MODE_AGENT,
|
16
16
|
MODE_AGENT_LLAMA,
|
17
|
+
MODE_AUDIO,
|
17
18
|
MODE_CHAT,
|
18
19
|
MODE_COMPLETION,
|
19
20
|
MODE_EXPERT,
|
@@ -63,6 +64,7 @@ class Custom:
|
|
63
64
|
self.window.ui.config['preset'][MODE_AGENT].box.setText(trans("preset.agent"))
|
64
65
|
self.window.ui.config['preset'][MODE_AGENT_LLAMA].box.setText(trans("preset.agent_llama"))
|
65
66
|
self.window.ui.config['preset'][MODE_EXPERT].box.setText(trans("preset.expert"))
|
67
|
+
self.window.ui.config['preset'][MODE_AUDIO].box.setText(trans("preset.audio"))
|
66
68
|
|
67
69
|
self.window.ui.config['global']['img_raw'].setText(trans("img.raw"))
|
68
70
|
|
pygpt_net/controller/mode.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.events import Event, AppEvent
|
@@ -41,6 +41,7 @@ class Mode:
|
|
41
41
|
self.window.dispatch(event)
|
42
42
|
self.window.controller.attachment.update()
|
43
43
|
self.window.controller.chat.attachment.update()
|
44
|
+
self.window.controller.chat.audio.update()
|
44
45
|
self.window.dispatch(AppEvent(AppEvent.MODE_SELECTED)) # app event
|
45
46
|
|
46
47
|
def set(self, mode: str):
|
@@ -85,7 +85,7 @@ class Capture:
|
|
85
85
|
|
86
86
|
# clear attachments before capture if needed
|
87
87
|
if self.window.controller.attachment.is_capture_clear():
|
88
|
-
self.window.controller.attachment.clear(True, auto=True)
|
88
|
+
self.window.controller.attachment.clear(True, auto=True, force=True)
|
89
89
|
|
90
90
|
try:
|
91
91
|
# prepare filename
|
@@ -127,7 +127,7 @@ class Capture:
|
|
127
127
|
|
128
128
|
# clear attachments before capture if needed
|
129
129
|
if self.window.controller.attachment.is_capture_clear():
|
130
|
-
self.window.controller.attachment.clear(True, auto=True)
|
130
|
+
self.window.controller.attachment.clear(True, auto=True, force=True)
|
131
131
|
|
132
132
|
try:
|
133
133
|
# prepare filename
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import datetime
|
@@ -16,12 +16,14 @@ from pygpt_net.core.types import (
|
|
16
16
|
MODE_AGENT,
|
17
17
|
MODE_AGENT_LLAMA,
|
18
18
|
MODE_ASSISTANT,
|
19
|
+
MODE_AUDIO,
|
19
20
|
MODE_CHAT,
|
20
21
|
MODE_COMPLETION,
|
21
22
|
MODE_EXPERT,
|
22
23
|
MODE_LANGCHAIN,
|
23
24
|
MODE_LLAMA_INDEX,
|
24
|
-
MODE_VISION,
|
25
|
+
MODE_VISION,
|
26
|
+
MODE_IMAGE,
|
25
27
|
)
|
26
28
|
from pygpt_net.item.preset import PresetItem
|
27
29
|
from pygpt_net.utils import trans
|
@@ -87,6 +89,10 @@ class Editor:
|
|
87
89
|
"type": "bool",
|
88
90
|
"label": "preset.agent",
|
89
91
|
},
|
92
|
+
MODE_AUDIO: {
|
93
|
+
"type": "bool",
|
94
|
+
"label": "preset.audio",
|
95
|
+
},
|
90
96
|
# "assistant": {
|
91
97
|
# "type": "bool",
|
92
98
|
# "label": "preset.assistant",
|
@@ -287,6 +293,8 @@ class Editor:
|
|
287
293
|
data.agent = True
|
288
294
|
elif mode == MODE_AGENT_LLAMA:
|
289
295
|
data.agent_llama = True
|
296
|
+
elif mode == MODE_AUDIO:
|
297
|
+
data.audio = True
|
290
298
|
|
291
299
|
options = {}
|
292
300
|
data_dict = data.to_dict()
|
@@ -322,6 +330,10 @@ class Editor:
|
|
322
330
|
self.window.ui.config[self.id]['tool.function'].model.updateData([])
|
323
331
|
|
324
332
|
# set focus to name field
|
333
|
+
current_model = self.window.core.config.get('model')
|
334
|
+
# set current model in combo box as selected
|
335
|
+
if id is None:
|
336
|
+
self.window.ui.config[self.id]['model'].set_value(current_model)
|
325
337
|
self.window.ui.config[self.id]['name'].setFocus()
|
326
338
|
self.show_hide_by_mode()
|
327
339
|
|
@@ -347,6 +359,7 @@ class Editor:
|
|
347
359
|
MODE_LLAMA_INDEX,
|
348
360
|
MODE_EXPERT,
|
349
361
|
MODE_AGENT_LLAMA,
|
362
|
+
MODE_AUDIO,
|
350
363
|
]
|
351
364
|
|
352
365
|
# disallow editing default preset
|
@@ -102,6 +102,8 @@ class UI:
|
|
102
102
|
prompt = str(self.window.ui.nodes['input'].toPlainText().strip())
|
103
103
|
input_tokens, system_tokens, extra_tokens, ctx_tokens, ctx_len, ctx_len_all, \
|
104
104
|
sum_tokens, max_current, threshold = self.window.core.tokens.get_current(prompt)
|
105
|
+
attachments_tokens = self.window.controller.chat.attachment.get_current_tokens()
|
106
|
+
sum_tokens += attachments_tokens
|
105
107
|
|
106
108
|
# ctx tokens
|
107
109
|
ctx_string = "{} / {} - {} {}".format(
|
@@ -119,11 +121,12 @@ class UI:
|
|
119
121
|
parsed_max_current = str(int(max_current))
|
120
122
|
parsed_max_current = parsed_max_current.replace("000000", "M").replace("000", "k")
|
121
123
|
|
122
|
-
input_string = "{} + {} + {} + {} = {} / {}".format(
|
124
|
+
input_string = "{} + {} + {} + {} + {} = {} / {}".format(
|
123
125
|
input_tokens,
|
124
126
|
system_tokens,
|
125
127
|
ctx_tokens,
|
126
128
|
extra_tokens,
|
129
|
+
attachments_tokens,
|
127
130
|
parsed_sum,
|
128
131
|
parsed_max_current
|
129
132
|
)
|
pygpt_net/core/access/voice.py
CHANGED
@@ -57,8 +57,8 @@ class Voice:
|
|
57
57
|
ControlEvent.CTX_SEARCH_CLEAR: "Clear the search results",
|
58
58
|
ControlEvent.INPUT_SEND: "Send the message to input",
|
59
59
|
ControlEvent.INPUT_APPEND: "Append message to current input without sending it",
|
60
|
-
ControlEvent.MODE_CHAT: "Switch to
|
61
|
-
ControlEvent.MODE_LLAMA_INDEX: "Switch to
|
60
|
+
ControlEvent.MODE_CHAT: "Switch to Chat mode",
|
61
|
+
ControlEvent.MODE_LLAMA_INDEX: "Switch to Chat with Files (LlamaIndex) mode",
|
62
62
|
ControlEvent.MODE_NEXT: "Switch to the next mode",
|
63
63
|
ControlEvent.MODE_PREV: "Switch to the previous mode",
|
64
64
|
ControlEvent.MODEL_NEXT: "Switch to the next model",
|
pygpt_net/core/agents/legacy.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -15,6 +15,7 @@ from pygpt_net.core.types import (
|
|
15
15
|
MODE_LANGCHAIN,
|
16
16
|
MODE_LLAMA_INDEX,
|
17
17
|
MODE_VISION,
|
18
|
+
MODE_AUDIO,
|
18
19
|
)
|
19
20
|
|
20
21
|
class Legacy:
|
@@ -31,6 +32,7 @@ class Legacy:
|
|
31
32
|
MODE_VISION,
|
32
33
|
MODE_LANGCHAIN,
|
33
34
|
MODE_LLAMA_INDEX,
|
35
|
+
MODE_AUDIO,
|
34
36
|
]
|
35
37
|
|
36
38
|
def get_allowed_modes(self) -> list:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 02:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -176,7 +176,8 @@ class Attachments:
|
|
176
176
|
mode: str,
|
177
177
|
name: str = None,
|
178
178
|
path: str = None,
|
179
|
-
auto_save: bool = True
|
179
|
+
auto_save: bool = True,
|
180
|
+
type: str = AttachmentItem.TYPE_FILE,
|
180
181
|
) -> AttachmentItem:
|
181
182
|
"""
|
182
183
|
Create new attachment
|
@@ -185,16 +186,19 @@ class Attachments:
|
|
185
186
|
:param name: name
|
186
187
|
:param path: path
|
187
188
|
:param auto_save: auto_save
|
189
|
+
:param type: type
|
188
190
|
:return: AttachmentItem
|
189
191
|
"""
|
190
192
|
# make local copy of external attachment if enabled
|
191
|
-
if
|
192
|
-
if
|
193
|
-
|
193
|
+
if type == AttachmentItem.TYPE_FILE and path is not None:
|
194
|
+
if self.window.core.config.get("upload.store"):
|
195
|
+
if not self.window.core.filesystem.in_work_dir(path):
|
196
|
+
path = self.window.core.filesystem.store_upload(path)
|
194
197
|
|
195
198
|
attachment = self.create()
|
196
199
|
attachment.name = name
|
197
200
|
attachment.path = path
|
201
|
+
attachment.type = type
|
198
202
|
|
199
203
|
if mode not in self.items:
|
200
204
|
self.items[mode] = {}
|
@@ -276,19 +280,20 @@ class Attachments:
|
|
276
280
|
del self.items[mode][id]
|
277
281
|
self.save()
|
278
282
|
|
279
|
-
def delete_all(self, mode: str, remove_local: bool = False, auto: bool = False):
|
283
|
+
def delete_all(self, mode: str, remove_local: bool = False, auto: bool = False, force: bool = False):
|
280
284
|
"""
|
281
285
|
Delete all attachments
|
282
286
|
|
283
287
|
:param mode: mode
|
284
288
|
:param remove_local: remove local copy
|
285
289
|
:param auto: auto delete
|
290
|
+
:param force: force delete
|
286
291
|
"""
|
287
292
|
if mode not in self.items:
|
288
293
|
self.items[mode] = {}
|
289
294
|
|
290
295
|
for id in list(self.items[mode].keys()):
|
291
|
-
if not self.items[mode][id].consumed and auto:
|
296
|
+
if (not self.items[mode][id].consumed and auto) and not force:
|
292
297
|
continue
|
293
298
|
if remove_local:
|
294
299
|
self.window.core.filesystem.remove_upload(
|
@@ -410,7 +415,7 @@ class Attachments:
|
|
410
415
|
for mode in self.items:
|
411
416
|
for id in self.items[mode]:
|
412
417
|
attachment = self.items[mode][id]
|
413
|
-
if attachment.path is not None:
|
418
|
+
if attachment.path is not None and attachment.type == AttachmentItem.TYPE_FILE:
|
414
419
|
attachment.path = self.window.core.filesystem.to_workdir(
|
415
420
|
attachment.path,
|
416
421
|
)
|
@@ -422,7 +427,7 @@ class Attachments:
|
|
422
427
|
for mode in data:
|
423
428
|
for id in data[mode]:
|
424
429
|
attachment = data[mode][id]
|
425
|
-
if attachment.path is not None:
|
430
|
+
if attachment.path is not None and attachment.type == AttachmentItem.TYPE_FILE:
|
426
431
|
attachment.path = self.window.core.filesystem.make_local(
|
427
432
|
attachment.path,
|
428
433
|
)
|