pygpt-net 2.4.30__py3-none-any.whl → 2.4.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +24 -0
- README.md +46 -5
- pygpt_net/CHANGELOG.txt +24 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/access/__init__.py +5 -5
- pygpt_net/controller/access/control.py +3 -2
- pygpt_net/controller/attachment.py +67 -1
- pygpt_net/controller/audio/__init__.py +34 -6
- pygpt_net/controller/chat/__init__.py +3 -1
- pygpt_net/controller/chat/attachment.py +239 -37
- pygpt_net/controller/chat/audio.py +99 -0
- pygpt_net/controller/chat/input.py +10 -3
- pygpt_net/controller/chat/output.py +4 -1
- pygpt_net/controller/chat/text.py +7 -3
- pygpt_net/controller/dialogs/confirm.py +17 -1
- pygpt_net/controller/lang/custom.py +3 -1
- pygpt_net/controller/mode.py +2 -1
- pygpt_net/controller/presets/editor.py +11 -2
- pygpt_net/core/access/voice.py +2 -2
- pygpt_net/core/agents/legacy.py +3 -1
- pygpt_net/core/attachments/__init__.py +11 -7
- pygpt_net/core/attachments/context.py +226 -44
- pygpt_net/core/{audio.py → audio/__init__.py} +1 -1
- pygpt_net/core/audio/context.py +34 -0
- pygpt_net/core/bridge/context.py +29 -1
- pygpt_net/core/ctx/__init__.py +4 -1
- pygpt_net/core/db/__init__.py +4 -2
- pygpt_net/core/debug/attachments.py +3 -1
- pygpt_net/core/debug/context.py +5 -1
- pygpt_net/core/debug/presets.py +3 -1
- pygpt_net/core/events/event.py +2 -1
- pygpt_net/core/experts/__init__.py +3 -1
- pygpt_net/core/idx/chat.py +28 -6
- pygpt_net/core/idx/indexing.py +123 -15
- pygpt_net/core/modes.py +3 -1
- pygpt_net/core/presets.py +13 -2
- pygpt_net/core/render/markdown/pid.py +2 -1
- pygpt_net/core/render/plain/pid.py +2 -1
- pygpt_net/core/render/web/body.py +34 -12
- pygpt_net/core/render/web/pid.py +2 -1
- pygpt_net/core/render/web/renderer.py +8 -3
- pygpt_net/core/tokens.py +4 -2
- pygpt_net/core/types/mode.py +2 -1
- pygpt_net/data/config/config.json +5 -3
- pygpt_net/data/config/models.json +190 -5
- pygpt_net/data/config/modes.json +11 -5
- pygpt_net/data/config/presets/current.audio.json +34 -0
- pygpt_net/data/config/settings.json +15 -1
- pygpt_net/data/css/web.css +70 -0
- pygpt_net/data/css/web.dark.css +4 -1
- pygpt_net/data/css/web.light.css +1 -1
- pygpt_net/data/locale/locale.de.ini +26 -13
- pygpt_net/data/locale/locale.en.ini +61 -46
- pygpt_net/data/locale/locale.es.ini +26 -13
- pygpt_net/data/locale/locale.fr.ini +28 -15
- pygpt_net/data/locale/locale.it.ini +26 -13
- pygpt_net/data/locale/locale.pl.ini +30 -17
- pygpt_net/data/locale/locale.uk.ini +26 -13
- pygpt_net/data/locale/locale.zh.ini +33 -20
- pygpt_net/data/locale/plugin.cmd_files.de.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.en.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.es.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.fr.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.it.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.pl.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.uk.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_files.zh.ini +4 -4
- pygpt_net/data/locale/plugin.cmd_web.de.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.en.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.es.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.fr.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.it.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.pl.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.uk.ini +5 -5
- pygpt_net/data/locale/plugin.cmd_web.zh.ini +5 -5
- pygpt_net/data/locale/plugin.idx_llama_index.de.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.en.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.es.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.fr.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.it.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.pl.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.uk.ini +12 -12
- pygpt_net/data/locale/plugin.idx_llama_index.zh.ini +12 -12
- pygpt_net/item/attachment.py +9 -1
- pygpt_net/item/ctx.py +9 -1
- pygpt_net/item/preset.py +5 -1
- pygpt_net/launcher.py +3 -1
- pygpt_net/migrations/Version20241126170000.py +28 -0
- pygpt_net/migrations/__init__.py +3 -1
- pygpt_net/plugin/audio_input/__init__.py +11 -1
- pygpt_net/plugin/audio_input/worker.py +9 -1
- pygpt_net/plugin/audio_output/__init__.py +37 -7
- pygpt_net/plugin/audio_output/worker.py +38 -41
- pygpt_net/plugin/cmd_code_interpreter/runner.py +2 -2
- pygpt_net/plugin/cmd_mouse_control/__init__.py +4 -2
- pygpt_net/plugin/openai_dalle/__init__.py +3 -1
- pygpt_net/plugin/openai_vision/__init__.py +3 -1
- pygpt_net/provider/core/attachment/json_file.py +4 -1
- pygpt_net/provider/core/config/patch.py +16 -0
- pygpt_net/provider/core/ctx/db_sqlite/storage.py +14 -4
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +19 -2
- pygpt_net/provider/core/model/patch.py +7 -1
- pygpt_net/provider/core/preset/json_file.py +5 -1
- pygpt_net/provider/gpt/__init__.py +14 -2
- pygpt_net/provider/gpt/audio.py +63 -0
- pygpt_net/provider/gpt/chat.py +76 -44
- pygpt_net/provider/gpt/utils.py +27 -0
- pygpt_net/provider/gpt/vision.py +37 -15
- pygpt_net/provider/loaders/base.py +10 -1
- pygpt_net/provider/loaders/web_yt.py +19 -1
- pygpt_net/tools/image_viewer/ui/dialogs.py +3 -1
- pygpt_net/ui/dialog/preset.py +3 -1
- pygpt_net/ui/dialog/url.py +29 -0
- pygpt_net/ui/dialogs.py +5 -1
- pygpt_net/ui/layout/chat/attachments.py +42 -6
- pygpt_net/ui/layout/chat/attachments_ctx.py +14 -4
- pygpt_net/ui/layout/chat/attachments_uploaded.py +8 -4
- pygpt_net/ui/widget/dialog/url.py +59 -0
- pygpt_net/ui/widget/lists/attachment.py +22 -17
- pygpt_net/ui/widget/lists/attachment_ctx.py +65 -3
- pygpt_net/ui/widget/textarea/url.py +43 -0
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.34.dist-info}/METADATA +48 -7
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.34.dist-info}/RECORD +126 -117
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.34.dist-info}/LICENSE +0 -0
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.34.dist-info}/WHEEL +0 -0
- {pygpt_net-2.4.30.dist-info → pygpt_net-2.4.34.dist-info}/entry_points.txt +0 -0
pygpt_net/core/debug/presets.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -15,6 +15,7 @@ from pygpt_net.core.types import (
|
|
15
15
|
MODE_AGENT,
|
16
16
|
MODE_AGENT_LLAMA,
|
17
17
|
MODE_ASSISTANT,
|
18
|
+
MODE_AUDIO,
|
18
19
|
MODE_CHAT,
|
19
20
|
MODE_COMPLETION,
|
20
21
|
MODE_EXPERT,
|
@@ -64,6 +65,7 @@ class PresetsDebug:
|
|
64
65
|
MODE_AGENT: preset.agent,
|
65
66
|
MODE_AGENT_LLAMA: preset.agent_llama,
|
66
67
|
MODE_EXPERT: preset.expert,
|
68
|
+
MODE_AUDIO: preset.audio,
|
67
69
|
'temperature': preset.temperature,
|
68
70
|
'version': preset.version,
|
69
71
|
}
|
pygpt_net/core/events/event.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -27,6 +27,7 @@ class Event(BaseEvent):
|
|
27
27
|
AUDIO_INPUT_TOGGLE = "audio.input.toggle"
|
28
28
|
AUDIO_OUTPUT_STOP = "audio.output.stop"
|
29
29
|
AUDIO_OUTPUT_TOGGLE = "audio.output.toggle"
|
30
|
+
AUDIO_PLAYBACK = "audio.playback"
|
30
31
|
AUDIO_READ_TEXT = "audio.read_text"
|
31
32
|
CMD_EXECUTE = "cmd.execute"
|
32
33
|
CMD_INLINE = "cmd.inline"
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
|
@@ -18,6 +18,7 @@ from pygpt_net.core.types import (
|
|
18
18
|
MODE_LANGCHAIN,
|
19
19
|
MODE_LLAMA_INDEX,
|
20
20
|
MODE_VISION,
|
21
|
+
MODE_AUDIO,
|
21
22
|
)
|
22
23
|
from pygpt_net.core.bridge.context import BridgeContext
|
23
24
|
from pygpt_net.core.events import Event, KernelEvent, RenderEvent
|
@@ -39,6 +40,7 @@ class Experts:
|
|
39
40
|
MODE_VISION,
|
40
41
|
MODE_LANGCHAIN,
|
41
42
|
MODE_LLAMA_INDEX,
|
43
|
+
MODE_AUDIO,
|
42
44
|
]
|
43
45
|
self.allowed_cmds = ["expert_call"]
|
44
46
|
|
pygpt_net/core/idx/chat.py
CHANGED
@@ -419,12 +419,8 @@ class Chat:
|
|
419
419
|
model = self.window.core.models.from_defaults()
|
420
420
|
service_context = self.window.core.idx.llm.get_service_context(model=model)
|
421
421
|
index = self.storage.get_ctx_idx(path, service_context=service_context)
|
422
|
-
|
423
|
-
|
424
|
-
llm=llm,
|
425
|
-
streaming=False,
|
426
|
-
).query(query)
|
427
|
-
"""
|
422
|
+
|
423
|
+
# 1. try to retrieve directly from index
|
428
424
|
retriever = index.as_retriever()
|
429
425
|
nodes = retriever.retrieve(query)
|
430
426
|
response = ""
|
@@ -435,6 +431,32 @@ class Chat:
|
|
435
431
|
output = ""
|
436
432
|
if response:
|
437
433
|
output = str(response)
|
434
|
+
else:
|
435
|
+
# 2. try with prepared prompt
|
436
|
+
prompt = """
|
437
|
+
# Task
|
438
|
+
Translate the below user prompt into a suitable, short query for the RAG engine, so it can fetch the context
|
439
|
+
related to the query from the vector database.
|
440
|
+
|
441
|
+
# Important rules
|
442
|
+
1. Edit the user prompt in a way that allows for the best possible result.
|
443
|
+
2. In your response, give me only the reworded query, without any additional information from yourself.
|
444
|
+
|
445
|
+
# User prompt:
|
446
|
+
```{prompt}```
|
447
|
+
""".format(prompt=query)
|
448
|
+
response_prepare = index.as_query_engine(
|
449
|
+
llm=service_context.llm,
|
450
|
+
streaming=False,
|
451
|
+
).query(prompt)
|
452
|
+
if response_prepare:
|
453
|
+
# try the final query with prepared prompt
|
454
|
+
final_response = index.as_query_engine(
|
455
|
+
llm=service_context.llm,
|
456
|
+
streaming=False,
|
457
|
+
).query(response_prepare.response)
|
458
|
+
if final_response:
|
459
|
+
output = str(final_response.response)
|
438
460
|
return output
|
439
461
|
|
440
462
|
def query_retrieval(
|
pygpt_net/core/idx/indexing.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import datetime
|
@@ -349,6 +349,65 @@ class Indexing:
|
|
349
349
|
data.append(doc.text)
|
350
350
|
return "\n".join(data)
|
351
351
|
|
352
|
+
def read_web_content(
|
353
|
+
self,
|
354
|
+
url: str,
|
355
|
+
type: str = "webpage",
|
356
|
+
extra_args: dict = None
|
357
|
+
) -> str:
|
358
|
+
"""
|
359
|
+
Get content from external resource
|
360
|
+
|
361
|
+
:param url: external url to index
|
362
|
+
:param type: type of URL (webpage, feed, etc.)
|
363
|
+
:param extra_args: extra arguments for loader
|
364
|
+
:return: file content
|
365
|
+
"""
|
366
|
+
docs = self.read_web(url, type, extra_args)
|
367
|
+
data = []
|
368
|
+
for doc in docs:
|
369
|
+
data.append(doc.text)
|
370
|
+
return "\n".join(data)
|
371
|
+
|
372
|
+
def read_web(
|
373
|
+
self,
|
374
|
+
url: str,
|
375
|
+
type: str = "webpage",
|
376
|
+
extra_args: dict = None,
|
377
|
+
) -> list[Document]:
|
378
|
+
"""
|
379
|
+
Read data from external resource
|
380
|
+
|
381
|
+
:param url: external url to index
|
382
|
+
:param type: type of URL (webpage, feed, etc.)
|
383
|
+
:param extra_args: extra arguments for loader
|
384
|
+
:return: list of documents
|
385
|
+
"""
|
386
|
+
documents = []
|
387
|
+
|
388
|
+
# check if web loader for defined type exists
|
389
|
+
if type not in self.loaders["web"]:
|
390
|
+
raise ValueError("No web loader for type: {}".format(type))
|
391
|
+
|
392
|
+
try:
|
393
|
+
if "url" not in extra_args:
|
394
|
+
extra_args["url"] = url
|
395
|
+
|
396
|
+
# get unique external content identifier
|
397
|
+
unique_id = self.data_providers[type].get_external_id(extra_args)
|
398
|
+
self.window.core.idx.log("Loading web documents from: {}".format(unique_id))
|
399
|
+
self.window.core.idx.log("Using web loader for type: {}".format(type))
|
400
|
+
|
401
|
+
args = self.data_providers[type].prepare_args(**extra_args)
|
402
|
+
|
403
|
+
# get documents from external resource
|
404
|
+
documents = self.loaders["web"][type].load_data(
|
405
|
+
**args
|
406
|
+
)
|
407
|
+
except Exception as e:
|
408
|
+
self.window.core.debug.log(e)
|
409
|
+
return documents
|
410
|
+
|
352
411
|
def prepare_document(self, doc: Document):
|
353
412
|
"""
|
354
413
|
Prepare document to store
|
@@ -924,7 +983,8 @@ class Indexing:
|
|
924
983
|
self,
|
925
984
|
file_path: str,
|
926
985
|
index_path: str,
|
927
|
-
model: ModelItem = None
|
986
|
+
model: ModelItem = None,
|
987
|
+
documents: list = None,
|
928
988
|
) -> list:
|
929
989
|
"""
|
930
990
|
Index context attachment
|
@@ -932,6 +992,45 @@ class Indexing:
|
|
932
992
|
:param file_path: path to file to index
|
933
993
|
:param index_path: index path
|
934
994
|
:param model: model
|
995
|
+
:param documents: list of documents (optional)
|
996
|
+
:return: response
|
997
|
+
"""
|
998
|
+
if model is None:
|
999
|
+
model = self.window.core.models.from_defaults()
|
1000
|
+
|
1001
|
+
service_context = self.window.core.idx.llm.get_service_context(model=model)
|
1002
|
+
index = self.window.core.idx.storage.get_ctx_idx(index_path, service_context=service_context) # get or create ctx index
|
1003
|
+
|
1004
|
+
idx = "tmp:{}".format(index_path) # tmp index id
|
1005
|
+
self.window.core.idx.log("Indexing to context attachment index: {}...".format(idx))
|
1006
|
+
|
1007
|
+
doc_ids = []
|
1008
|
+
if documents is None:
|
1009
|
+
documents = self.get_documents(file_path)
|
1010
|
+
for d in documents:
|
1011
|
+
if self.is_stopped(): # force stop
|
1012
|
+
break
|
1013
|
+
self.prepare_document(d)
|
1014
|
+
self.index_document(index, d)
|
1015
|
+
doc_ids.append(d.id_) # add to index
|
1016
|
+
|
1017
|
+
self.window.core.idx.storage.store_ctx_idx(index_path, index)
|
1018
|
+
return doc_ids
|
1019
|
+
|
1020
|
+
def index_attachment_web(
|
1021
|
+
self,
|
1022
|
+
url: str,
|
1023
|
+
index_path: str,
|
1024
|
+
model: ModelItem = None,
|
1025
|
+
documents: list = None,
|
1026
|
+
) -> list:
|
1027
|
+
"""
|
1028
|
+
Index context attachment
|
1029
|
+
|
1030
|
+
:param url: URL to index
|
1031
|
+
:param index_path: index path
|
1032
|
+
:param model: model
|
1033
|
+
:param documents: list of documents (optional)
|
935
1034
|
:return: response
|
936
1035
|
"""
|
937
1036
|
if model is None:
|
@@ -943,8 +1042,14 @@ class Indexing:
|
|
943
1042
|
idx = "tmp:{}".format(index_path) # tmp index id
|
944
1043
|
self.window.core.idx.log("Indexing to context attachment index: {}...".format(idx))
|
945
1044
|
|
1045
|
+
web_type = self.get_webtype(url)
|
946
1046
|
doc_ids = []
|
947
|
-
documents
|
1047
|
+
if documents is None:
|
1048
|
+
documents = self.read_web(
|
1049
|
+
url=url,
|
1050
|
+
type=web_type,
|
1051
|
+
extra_args={},
|
1052
|
+
)
|
948
1053
|
for d in documents:
|
949
1054
|
if self.is_stopped(): # force stop
|
950
1055
|
break
|
@@ -955,20 +1060,23 @@ class Indexing:
|
|
955
1060
|
self.window.core.idx.storage.store_ctx_idx(index_path, index)
|
956
1061
|
return doc_ids
|
957
1062
|
|
1063
|
+
def get_webtype(self, url: str) -> str:
|
958
1064
|
"""
|
959
|
-
|
960
|
-
|
961
|
-
|
962
|
-
|
963
|
-
response = index.as_query_engine(
|
964
|
-
llm=llm,
|
965
|
-
streaming=False,
|
966
|
-
).query(query) # query with default prompt
|
967
|
-
if response:
|
968
|
-
ctx.add_doc_meta(self.get_metadata(response.source_nodes)) # store metadata
|
969
|
-
output = response.
|
970
|
-
return output
|
1065
|
+
Get web loader type by URL
|
1066
|
+
|
1067
|
+
:param url: URL
|
1068
|
+
:return: web loader type
|
971
1069
|
"""
|
1070
|
+
type = "webpage" # default
|
1071
|
+
for id in self.data_providers:
|
1072
|
+
loader = self.data_providers[id]
|
1073
|
+
if hasattr(loader, "is_supported_attachment"):
|
1074
|
+
if loader.is_supported_attachment(url):
|
1075
|
+
type = id
|
1076
|
+
break
|
1077
|
+
print("Selected web data loader: {}".format(type))
|
1078
|
+
return type
|
1079
|
+
|
972
1080
|
def remove_attachment(self, index_path: str, doc_id: str) -> bool:
|
973
1081
|
"""
|
974
1082
|
Remove document from index
|
pygpt_net/core/modes.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.provider.core.mode.json_file import JsonFileProvider
|
@@ -14,6 +14,7 @@ from pygpt_net.core.types import (
|
|
14
14
|
MODE_AGENT,
|
15
15
|
MODE_AGENT_LLAMA,
|
16
16
|
MODE_ASSISTANT,
|
17
|
+
MODE_AUDIO,
|
17
18
|
MODE_CHAT,
|
18
19
|
MODE_COMPLETION,
|
19
20
|
MODE_EXPERT,
|
@@ -39,6 +40,7 @@ class Modes:
|
|
39
40
|
MODE_AGENT,
|
40
41
|
MODE_AGENT_LLAMA,
|
41
42
|
MODE_ASSISTANT,
|
43
|
+
MODE_AUDIO,
|
42
44
|
MODE_CHAT,
|
43
45
|
MODE_COMPLETION,
|
44
46
|
MODE_EXPERT,
|
pygpt_net/core/presets.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -17,6 +17,7 @@ from pygpt_net.core.types import (
|
|
17
17
|
MODE_AGENT,
|
18
18
|
MODE_AGENT_LLAMA,
|
19
19
|
MODE_ASSISTANT,
|
20
|
+
MODE_AUDIO,
|
20
21
|
MODE_CHAT,
|
21
22
|
MODE_COMPLETION,
|
22
23
|
MODE_EXPERT,
|
@@ -72,6 +73,7 @@ class Presets:
|
|
72
73
|
curr_agent = self.build()
|
73
74
|
curr_agent_llama = self.build()
|
74
75
|
curr_expert = self.build()
|
76
|
+
curr_audio = self.build()
|
75
77
|
|
76
78
|
# prepare ids
|
77
79
|
id_chat = 'current.chat'
|
@@ -84,6 +86,7 @@ class Presets:
|
|
84
86
|
id_agent = 'current.agent'
|
85
87
|
id_agent_llama = 'current.agent_llama'
|
86
88
|
id_expert = 'current.expert'
|
89
|
+
id_audio = 'current.audio'
|
87
90
|
|
88
91
|
# set default initial prompt for chat mode
|
89
92
|
curr_chat.prompt = self.window.core.prompt.get('default')
|
@@ -109,6 +112,8 @@ class Presets:
|
|
109
112
|
curr_agent_llama = self.items[id_agent_llama]
|
110
113
|
if id_expert in self.items:
|
111
114
|
curr_expert = self.items[id_expert]
|
115
|
+
if id_audio in self.items:
|
116
|
+
curr_audio = self.items[id_audio]
|
112
117
|
|
113
118
|
# allow usage in specific mode
|
114
119
|
curr_chat.chat = True
|
@@ -121,6 +126,7 @@ class Presets:
|
|
121
126
|
curr_agent.agent = True
|
122
127
|
curr_agent_llama.agent_llama = True
|
123
128
|
curr_expert.expert = True
|
129
|
+
curr_audio.audio = True
|
124
130
|
|
125
131
|
# always apply default name
|
126
132
|
curr_chat.name = '*'
|
@@ -133,6 +139,7 @@ class Presets:
|
|
133
139
|
curr_agent.name = '*'
|
134
140
|
curr_agent_llama.name = '*'
|
135
141
|
curr_expert.name = '*'
|
142
|
+
curr_audio.name = '*'
|
136
143
|
|
137
144
|
# append at first position
|
138
145
|
self.items = {
|
@@ -146,6 +153,7 @@ class Presets:
|
|
146
153
|
id_agent: curr_agent,
|
147
154
|
id_agent_llama: curr_agent_llama,
|
148
155
|
id_expert: curr_expert,
|
156
|
+
id_audio: curr_audio,
|
149
157
|
**self.items
|
150
158
|
}
|
151
159
|
|
@@ -217,6 +225,8 @@ class Presets:
|
|
217
225
|
return MODE_AGENT_LLAMA
|
218
226
|
if preset.expert:
|
219
227
|
return MODE_EXPERT
|
228
|
+
if preset.audio:
|
229
|
+
return MODE_AUDIO
|
220
230
|
return None
|
221
231
|
|
222
232
|
def has(self, mode: str, id: str) -> bool:
|
@@ -286,7 +296,8 @@ class Presets:
|
|
286
296
|
or (mode == MODE_LLAMA_INDEX and self.items[id].llama_index) \
|
287
297
|
or (mode == MODE_AGENT and self.items[id].agent) \
|
288
298
|
or (mode == MODE_AGENT_LLAMA and self.items[id].agent_llama) \
|
289
|
-
or (mode == MODE_EXPERT and self.items[id].expert)
|
299
|
+
or (mode == MODE_EXPERT and self.items[id].expert) \
|
300
|
+
or (mode == MODE_AUDIO and self.items[id].audio):
|
290
301
|
presets[id] = self.items[id]
|
291
302
|
return presets
|
292
303
|
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
class PidData():
|
@@ -17,6 +17,7 @@ class PidData():
|
|
17
17
|
self.meta = meta
|
18
18
|
self.images_appended = []
|
19
19
|
self.urls_appended = []
|
20
|
+
self.files_appended = []
|
20
21
|
self.buffer = ""
|
21
22
|
self.prev_position = None # previous cursor position (for chunk append)
|
22
23
|
self.is_cmd = False
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
class PidData():
|
@@ -17,5 +17,6 @@ class PidData():
|
|
17
17
|
self.meta = meta
|
18
18
|
self.images_appended = []
|
19
19
|
self.urls_appended = []
|
20
|
+
self.files_appended = []
|
20
21
|
self.buffer = ""
|
21
22
|
self.is_cmd = False
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -167,10 +167,12 @@ class Body:
|
|
167
167
|
if num is not None and num_all is not None and num_all > 1:
|
168
168
|
num_str = " [{}]".format(num)
|
169
169
|
url, path = self.window.core.filesystem.extract_local_url(url)
|
170
|
-
|
171
|
-
<
|
170
|
+
basename = os.path.basename(path)
|
171
|
+
return """<div class="extra-src-img-box" title="{url}"><div class="img-outer"><div class="img-wrapper"><a href="{url}"><img src="{path}" class="image"></a></div>
|
172
|
+
<a href="{url}" class="title">{title}</a></div></div>""". \
|
172
173
|
format(prefix=trans('chat.prefix.img'),
|
173
174
|
url=url,
|
175
|
+
title=basename,
|
174
176
|
path=path,
|
175
177
|
num=num_str)
|
176
178
|
|
@@ -183,13 +185,19 @@ class Body:
|
|
183
185
|
:param num_all: number of all URLs
|
184
186
|
:return: HTML code
|
185
187
|
"""
|
188
|
+
icon_path = os.path.join(
|
189
|
+
self.window.core.config.get_app_path(),
|
190
|
+
"data", "icons", "public_filled.svg"
|
191
|
+
)
|
192
|
+
icon = '<img src="file://{}" width="25" height="25" valign="middle" class="extra-src-icon">'.format(icon_path)
|
186
193
|
num_str = ""
|
187
194
|
if num is not None and num_all is not None and num_all > 1:
|
188
195
|
num_str = " [{}]".format(num)
|
189
|
-
return """<b>{
|
190
|
-
format(
|
191
|
-
|
192
|
-
|
196
|
+
return """{icon}<b>{num}</b> <a href="{url}" title="{url}">{url}</a>""". \
|
197
|
+
format(url=url,
|
198
|
+
num=num_str,
|
199
|
+
icon=icon,
|
200
|
+
)
|
193
201
|
|
194
202
|
def get_docs_html(self, docs: list) -> str:
|
195
203
|
"""
|
@@ -223,8 +231,16 @@ class Body:
|
|
223
231
|
except Exception as e:
|
224
232
|
pass
|
225
233
|
|
234
|
+
icon_path = os.path.join(
|
235
|
+
self.window.core.config.get_app_path(),
|
236
|
+
"data", "icons", "db.svg"
|
237
|
+
)
|
238
|
+
icon = '<img src="file://{}" width="25" height="25" valign="middle" class="extra-src-icon">'.format(icon_path)
|
226
239
|
if html_sources != "":
|
227
|
-
html += "<p
|
240
|
+
html += "<p>{icon}<small><b>{prefix}:</b></small></p>".format(
|
241
|
+
prefix=trans('chat.prefix.doc'),
|
242
|
+
icon=icon,
|
243
|
+
)
|
228
244
|
html += "<div class=\"cmd\">"
|
229
245
|
html += "<p>" + html_sources + "</p>"
|
230
246
|
html += "</div> "
|
@@ -239,15 +255,21 @@ class Body:
|
|
239
255
|
:param num_all: number of all files
|
240
256
|
:return: HTML code
|
241
257
|
"""
|
258
|
+
icon_path = os.path.join(
|
259
|
+
self.window.core.config.get_app_path(),
|
260
|
+
"data", "icons", "attachments.svg"
|
261
|
+
)
|
262
|
+
icon = '<img src="file://{}" width="25" height="25" valign="middle" class="extra-src-icon">'.format(icon_path)
|
242
263
|
num_str = ""
|
243
264
|
if num is not None and num_all is not None and num_all > 1:
|
244
265
|
num_str = " [{}]".format(num)
|
245
266
|
url, path = self.window.core.filesystem.extract_local_url(url)
|
246
|
-
return """<
|
247
|
-
format(
|
248
|
-
url=url,
|
267
|
+
return """{icon} <b>{num}</b> <a href="{url}">{path}</a>""". \
|
268
|
+
format(url=url,
|
249
269
|
path=path,
|
250
|
-
num=num_str
|
270
|
+
num=num_str,
|
271
|
+
icon=icon,
|
272
|
+
)
|
251
273
|
|
252
274
|
def prepare_tool_extra(self, ctx: CtxItem) -> str:
|
253
275
|
"""
|
pygpt_net/core/render/web/pid.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.utils import trans
|
@@ -20,6 +20,7 @@ class PidData():
|
|
20
20
|
self.meta = meta
|
21
21
|
self.images_appended = []
|
22
22
|
self.urls_appended = []
|
23
|
+
self.files_appended = []
|
23
24
|
self.buffer = "" # stream buffer
|
24
25
|
self.is_cmd = False
|
25
26
|
self.html = "" # html buffer
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -420,16 +420,20 @@ class Renderer(BaseRenderer):
|
|
420
420
|
# files and attachments, TODO check attachments
|
421
421
|
c = len(ctx.files)
|
422
422
|
if c > 0:
|
423
|
+
files_html = []
|
423
424
|
n = 1
|
424
425
|
for file in ctx.files:
|
425
|
-
if file in appended:
|
426
|
+
if file in appended or file in self.pids[pid].files_appended:
|
426
427
|
continue
|
427
428
|
try:
|
428
429
|
appended.append(file)
|
429
|
-
|
430
|
+
files_html.append(self.body.get_file_html(file, n, c))
|
431
|
+
self.pids[pid].files_appended.append(file)
|
430
432
|
n += 1
|
431
433
|
except Exception as e:
|
432
434
|
pass
|
435
|
+
if files_html:
|
436
|
+
html += "<br/>" + "<br/>".join(files_html)
|
433
437
|
|
434
438
|
# urls
|
435
439
|
c = len(ctx.urls)
|
@@ -519,6 +523,7 @@ class Renderer(BaseRenderer):
|
|
519
523
|
self.clear_chunks(pid)
|
520
524
|
self.pids[pid].images_appended = []
|
521
525
|
self.pids[pid].urls_appended = []
|
526
|
+
self.pids[pid].files_appended = []
|
522
527
|
self.get_output_node_by_pid(pid).reset_current_content()
|
523
528
|
self.reset_names_by_pid(pid)
|
524
529
|
|
pygpt_net/core/tokens.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import tiktoken
|
@@ -15,6 +15,7 @@ from pygpt_net.core.types import (
|
|
15
15
|
MODE_AGENT,
|
16
16
|
MODE_AGENT_LLAMA,
|
17
17
|
MODE_ASSISTANT,
|
18
|
+
MODE_AUDIO,
|
18
19
|
MODE_CHAT,
|
19
20
|
MODE_COMPLETION,
|
20
21
|
MODE_EXPERT,
|
@@ -33,6 +34,7 @@ CHAT_MODES = [
|
|
33
34
|
MODE_AGENT,
|
34
35
|
MODE_AGENT_LLAMA,
|
35
36
|
MODE_EXPERT,
|
37
|
+
MODE_AUDIO,
|
36
38
|
]
|
37
39
|
|
38
40
|
|
@@ -374,7 +376,7 @@ class Tokens:
|
|
374
376
|
model_id = self.window.core.models.get_id(model)
|
375
377
|
mode = self.window.core.config.get('mode')
|
376
378
|
tokens = 0
|
377
|
-
if mode
|
379
|
+
if mode in [MODE_CHAT, MODE_VISION, MODE_AUDIO]:
|
378
380
|
tokens += self.from_prompt(system_prompt, "", model_id) # system prompt
|
379
381
|
tokens += self.from_text("system", model_id)
|
380
382
|
tokens += self.from_prompt(input_prompt, "", model_id) # input prompt
|
pygpt_net/core/types/mode.py
CHANGED
@@ -6,9 +6,10 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2024.11.
|
9
|
+
# Updated Date: 2024.11.26 19:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
|
+
MODE_AUDIO = "audio"
|
12
13
|
MODE_CHAT = "chat"
|
13
14
|
MODE_COMPLETION = "completion"
|
14
15
|
MODE_IMAGE = "img"
|
@@ -1,8 +1,8 @@
|
|
1
1
|
{
|
2
2
|
"__meta__": {
|
3
|
-
"version": "2.4.
|
4
|
-
"app.version": "2.4.
|
5
|
-
"updated_at": "2024-11-
|
3
|
+
"version": "2.4.34",
|
4
|
+
"app.version": "2.4.34",
|
5
|
+
"updated_at": "2024-11-26T00:00:00"
|
6
6
|
},
|
7
7
|
"access.audio.event.speech": false,
|
8
8
|
"access.audio.event.speech.disabled": [],
|
@@ -65,6 +65,7 @@
|
|
65
65
|
"assistant": "",
|
66
66
|
"assistant_thread": "",
|
67
67
|
"assistant.store.hide_threads": true,
|
68
|
+
"attachments_auto_index": false,
|
68
69
|
"attachments_send_clear": true,
|
69
70
|
"attachments_capture_clear": true,
|
70
71
|
"audio.transcribe.convert_video": true,
|
@@ -74,6 +75,7 @@
|
|
74
75
|
"ctx.attachment.img": false,
|
75
76
|
"ctx.attachment.mode": "full",
|
76
77
|
"ctx.attachment.summary.model": "gpt-4o-mini",
|
78
|
+
"ctx.attachment.query.model": "gpt-4o-mini",
|
77
79
|
"ctx.attachment.verbose": false,
|
78
80
|
"ctx.auto_summary": true,
|
79
81
|
"ctx.auto_summary.model": "gpt-4o-mini",
|