pygpt-net 2.5.1__py3-none-any.whl → 2.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +5 -0
- README.md +6 -1
- pygpt_net/CHANGELOG.txt +5 -0
- pygpt_net/__init__.py +1 -1
- pygpt_net/controller/chat/image.py +8 -2
- pygpt_net/controller/chat/input.py +9 -1
- pygpt_net/controller/config/placeholder.py +7 -2
- pygpt_net/controller/model/__init__.py +5 -2
- pygpt_net/core/command/__init__.py +8 -2
- pygpt_net/core/models/__init__.py +3 -1
- pygpt_net/core/models/ollama.py +47 -0
- pygpt_net/core/prompt/__init__.py +38 -13
- pygpt_net/core/prompt/base/__init__.py +70 -0
- pygpt_net/core/prompt/base/gpt.py +46 -0
- pygpt_net/core/prompt/template.py +6 -2
- pygpt_net/data/config/config.json +2 -2
- pygpt_net/data/config/models.json +12 -12
- pygpt_net/data/config/modes.json +2 -2
- pygpt_net/data/locale/locale.de.ini +1 -1
- pygpt_net/data/locale/locale.en.ini +1 -1
- pygpt_net/data/locale/locale.es.ini +1 -1
- pygpt_net/data/locale/locale.fr.ini +1 -1
- pygpt_net/data/locale/locale.it.ini +1 -1
- pygpt_net/data/locale/locale.pl.ini +1 -1
- pygpt_net/data/locale/locale.uk.ini +1 -1
- pygpt_net/data/locale/locale.zh.ini +1 -1
- pygpt_net/plugin/openai_dalle/__init__.py +2 -2
- pygpt_net/provider/core/model/patch.py +10 -1
- {pygpt_net-2.5.1.dist-info → pygpt_net-2.5.2.dist-info}/METADATA +7 -2
- {pygpt_net-2.5.1.dist-info → pygpt_net-2.5.2.dist-info}/RECORD +33 -30
- {pygpt_net-2.5.1.dist-info → pygpt_net-2.5.2.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.1.dist-info → pygpt_net-2.5.2.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.1.dist-info → pygpt_net-2.5.2.dist-info}/entry_points.txt +0 -0
CHANGELOG.md
CHANGED
README.md
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
[](https://snapcraft.io/pygpt)
|
4
4
|
|
5
|
-
Release: **2.5.
|
5
|
+
Release: **2.5.2** | build: **2025.02.01** | Python: **>=3.10, <3.13**
|
6
6
|
|
7
7
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
8
8
|
>
|
@@ -3960,6 +3960,11 @@ may consume additional tokens that are not displayed in the main window.
|
|
3960
3960
|
|
3961
3961
|
## Recent changes:
|
3962
3962
|
|
3963
|
+
**2.5.2 (2025-02-01)**
|
3964
|
+
|
3965
|
+
- Fix: spinner update after inline image generation.
|
3966
|
+
- Added Ollama suffix to Ollama-models in models list.
|
3967
|
+
|
3963
3968
|
**2.5.1 (2025-02-01)**
|
3964
3969
|
|
3965
3970
|
- PySide6 upgraded to 6.6.2.
|
pygpt_net/CHANGELOG.txt
CHANGED
pygpt_net/__init__.py
CHANGED
@@ -13,7 +13,7 @@ __author__ = "Marcin Szczygliński"
|
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
15
15
|
__license__ = "MIT"
|
16
|
-
__version__ = "2.5.
|
16
|
+
__version__ = "2.5.2"
|
17
17
|
__build__ = "2025.02.01"
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Optional, List
|
@@ -232,9 +232,12 @@ class Image:
|
|
232
232
|
})
|
233
233
|
self.window.dispatch(event)
|
234
234
|
self.window.controller.chat.common.unlock_input() # unlock input
|
235
|
+
|
236
|
+
event = RenderEvent(RenderEvent.TOOL_UPDATE, data)
|
237
|
+
self.window.dispatch(event) # end of tool, hide spinner icon
|
235
238
|
return
|
236
239
|
|
237
|
-
# NOT internal-mode, user called, so append only img output to chat (show images now)
|
240
|
+
# NOT internal-mode, user called, so append only img output to chat (show images now):
|
238
241
|
|
239
242
|
data = {
|
240
243
|
"meta": ctx.meta,
|
@@ -247,3 +250,6 @@ class Image:
|
|
247
250
|
self.window.dispatch(event) # end extra
|
248
251
|
|
249
252
|
self.window.controller.chat.common.unlock_input() # unlock input
|
253
|
+
|
254
|
+
event = RenderEvent(RenderEvent.TOOL_UPDATE, data)
|
255
|
+
self.window.dispatch(event) # end of tool, hide spinner icon
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Optional, Any, Dict
|
@@ -17,6 +17,7 @@ from pygpt_net.core.events import Event, AppEvent, KernelEvent, RenderEvent
|
|
17
17
|
from pygpt_net.core.types import (
|
18
18
|
MODE_AGENT,
|
19
19
|
MODE_AGENT_LLAMA,
|
20
|
+
MODE_LLAMA_INDEX,
|
20
21
|
MODE_ASSISTANT,
|
21
22
|
MODE_IMAGE,
|
22
23
|
)
|
@@ -73,6 +74,13 @@ class Input:
|
|
73
74
|
self.window.controller.agent.common.display_infinity_loop_confirm()
|
74
75
|
return
|
75
76
|
|
77
|
+
# TODO: check ollama status
|
78
|
+
"""
|
79
|
+
if mode == MODE_LLAMA_INDEX:
|
80
|
+
status = self.window.core.models.ollama.get_status()
|
81
|
+
print("Ollama status: {}".format(status))
|
82
|
+
"""
|
83
|
+
|
76
84
|
# listen for stop command
|
77
85
|
if self.generating \
|
78
86
|
and text is not None \
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Dict, Any, List
|
@@ -267,7 +267,12 @@ class Placeholder:
|
|
267
267
|
models = self.window.core.models.get_all()
|
268
268
|
data = []
|
269
269
|
for id in models:
|
270
|
-
|
270
|
+
model = models[id]
|
271
|
+
suffix = ""
|
272
|
+
if "provider" in model.llama_index and model.llama_index["provider"] == "ollama":
|
273
|
+
suffix = " [Ollama]"
|
274
|
+
name = model.name + suffix
|
275
|
+
data.append({id: name})
|
271
276
|
return data
|
272
277
|
|
273
278
|
def get_agent_modes(self) -> List[Dict[str, str]]:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Optional
|
@@ -157,7 +157,10 @@ class Model:
|
|
157
157
|
items = {}
|
158
158
|
data = self.window.core.models.get_by_mode(mode)
|
159
159
|
for k in data:
|
160
|
-
|
160
|
+
suffix = ""
|
161
|
+
if "provider" in data[k].llama_index and data[k].llama_index["provider"] == "ollama":
|
162
|
+
suffix = " [Ollama]"
|
163
|
+
items[k] = data[k].name + suffix
|
161
164
|
self.window.ui.nodes["prompt.model"].set_keys(items)
|
162
165
|
|
163
166
|
def update(self):
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.01
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -39,12 +39,16 @@ class Command:
|
|
39
39
|
|
40
40
|
def append_syntax(
|
41
41
|
self,
|
42
|
-
data: Dict[str, Any]
|
42
|
+
data: Dict[str, Any],
|
43
|
+
mode: str = None,
|
44
|
+
model: ModelItem = None
|
43
45
|
) -> str:
|
44
46
|
"""
|
45
47
|
Append command syntax to the system prompt
|
46
48
|
|
47
49
|
:param data: event data
|
50
|
+
:param mode: mode
|
51
|
+
:param model: model item
|
48
52
|
:return: prompt with appended syntax
|
49
53
|
"""
|
50
54
|
prompt = data['prompt']
|
@@ -603,6 +607,7 @@ class Command:
|
|
603
607
|
data = {
|
604
608
|
'prompt': "",
|
605
609
|
'silent': True,
|
610
|
+
'force': True,
|
606
611
|
'syntax': [],
|
607
612
|
'cmd': [],
|
608
613
|
}
|
@@ -616,6 +621,7 @@ class Command:
|
|
616
621
|
data = {
|
617
622
|
'prompt': "",
|
618
623
|
'silent': True,
|
624
|
+
'force': True,
|
619
625
|
'syntax': [],
|
620
626
|
'cmd': [],
|
621
627
|
}
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import copy
|
@@ -22,6 +22,7 @@ from pygpt_net.core.types import (
|
|
22
22
|
from pygpt_net.item.model import ModelItem
|
23
23
|
from pygpt_net.provider.core.model.json_file import JsonFileProvider
|
24
24
|
|
25
|
+
from .ollama import Ollama
|
25
26
|
|
26
27
|
class Models:
|
27
28
|
def __init__(self, window=None):
|
@@ -32,6 +33,7 @@ class Models:
|
|
32
33
|
"""
|
33
34
|
self.window = window
|
34
35
|
self.provider = JsonFileProvider(window)
|
36
|
+
self.ollama = Ollama(window)
|
35
37
|
self.default = "gpt-4o-mini"
|
36
38
|
self.items = {}
|
37
39
|
|
@@ -0,0 +1,47 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# ================================================== #
|
4
|
+
# This file is a part of PYGPT package #
|
5
|
+
# Website: https://pygpt.net #
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
|
+
# MIT License #
|
8
|
+
# Created By : Marcin Szczygliński #
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
|
+
# ================================================== #
|
11
|
+
|
12
|
+
import requests
|
13
|
+
|
14
|
+
class Ollama:
|
15
|
+
def __init__(self, window=None):
|
16
|
+
"""
|
17
|
+
Ollama core
|
18
|
+
|
19
|
+
:param window: Window instance
|
20
|
+
"""
|
21
|
+
self.window = window
|
22
|
+
|
23
|
+
def get_status(self) -> dict:
|
24
|
+
"""
|
25
|
+
Check Ollama status
|
26
|
+
|
27
|
+
:return: dict
|
28
|
+
"""
|
29
|
+
url = "http://localhost:11434/api/tags"
|
30
|
+
try:
|
31
|
+
response = requests.get(url, timeout=2)
|
32
|
+
if response.status_code == 200:
|
33
|
+
data = response.json()
|
34
|
+
return {
|
35
|
+
'status': True,
|
36
|
+
'models': data.get('models', [])
|
37
|
+
}
|
38
|
+
else:
|
39
|
+
return {
|
40
|
+
'status': False,
|
41
|
+
'models': []
|
42
|
+
}
|
43
|
+
except requests.exceptions.RequestException:
|
44
|
+
return {
|
45
|
+
'status': False,
|
46
|
+
'models': []
|
47
|
+
}
|
@@ -6,16 +6,16 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.01
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.events import Event
|
13
|
-
|
14
13
|
from pygpt_net.item.ctx import CtxItem
|
14
|
+
from pygpt_net.item.model import ModelItem
|
15
15
|
|
16
|
+
from .base import Base
|
16
17
|
from .custom import Custom
|
17
18
|
from .template import Template
|
18
|
-
from pygpt_net.item.model import ModelItem
|
19
19
|
|
20
20
|
|
21
21
|
class Prompt:
|
@@ -26,20 +26,29 @@ class Prompt:
|
|
26
26
|
:param window: Window instance
|
27
27
|
"""
|
28
28
|
self.window = window
|
29
|
+
self.base = Base(window)
|
29
30
|
self.custom = Custom(window)
|
30
31
|
self.template = Template(window)
|
31
32
|
|
32
|
-
def get(
|
33
|
+
def get(
|
34
|
+
self,
|
35
|
+
prompt: str,
|
36
|
+
mode: str = None,
|
37
|
+
model: ModelItem = None
|
38
|
+
) -> str:
|
33
39
|
"""
|
34
|
-
Get prompt content
|
40
|
+
Get system prompt content
|
35
41
|
|
36
42
|
:param prompt: id of the prompt
|
43
|
+
:param mode: mode
|
44
|
+
:param model: model item
|
37
45
|
:return: text content
|
38
46
|
"""
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
47
|
+
return self.base.get(
|
48
|
+
prompt=prompt,
|
49
|
+
mode=mode,
|
50
|
+
model=model,
|
51
|
+
)
|
43
52
|
|
44
53
|
def build_final_system_prompt(
|
45
54
|
self,
|
@@ -90,14 +99,22 @@ class Prompt:
|
|
90
99
|
event = Event(Event.CMD_SYNTAX, data)
|
91
100
|
self.window.dispatch(event)
|
92
101
|
if event.data and "cmd" in event.data and event.data["cmd"]:
|
93
|
-
prompt = self.window.core.command.append_syntax(
|
102
|
+
prompt = self.window.core.command.append_syntax(
|
103
|
+
data=event.data,
|
104
|
+
mode=mode,
|
105
|
+
model=model,
|
106
|
+
)
|
94
107
|
|
95
108
|
# inline cmd syntax only
|
96
109
|
elif self.window.controller.plugins.is_type_enabled("cmd.inline"):
|
97
110
|
event = Event(Event.CMD_SYNTAX_INLINE, data)
|
98
111
|
self.window.dispatch(event)
|
99
112
|
if event.data and "cmd" in event.data and event.data["cmd"]:
|
100
|
-
prompt = self.window.core.command.append_syntax(
|
113
|
+
prompt = self.window.core.command.append_syntax(
|
114
|
+
data=event.data,
|
115
|
+
mode=mode,
|
116
|
+
model=model,
|
117
|
+
)
|
101
118
|
|
102
119
|
return prompt
|
103
120
|
|
@@ -168,13 +185,21 @@ class Prompt:
|
|
168
185
|
event = Event(Event.CMD_SYNTAX, data)
|
169
186
|
self.window.dispatch(event)
|
170
187
|
if event.data and "cmd" in event.data and event.data["cmd"]:
|
171
|
-
sys_prompt = self.window.core.command.append_syntax(
|
188
|
+
sys_prompt = self.window.core.command.append_syntax(
|
189
|
+
data=event.data,
|
190
|
+
mode=mode,
|
191
|
+
model=model,
|
192
|
+
)
|
172
193
|
|
173
194
|
# inline cmd syntax only
|
174
195
|
elif self.window.controller.plugins.is_type_enabled("cmd.inline"):
|
175
196
|
event = Event(Event.CMD_SYNTAX_INLINE, data)
|
176
197
|
self.window.dispatch(event)
|
177
198
|
if event.data and "cmd" in event.data and event.data["cmd"]:
|
178
|
-
sys_prompt = self.window.core.command.append_syntax(
|
199
|
+
sys_prompt = self.window.core.command.append_syntax(
|
200
|
+
data=event.data,
|
201
|
+
mode=mode,
|
202
|
+
model=model,
|
203
|
+
)
|
179
204
|
|
180
205
|
return sys_prompt
|
@@ -0,0 +1,70 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# ================================================== #
|
4
|
+
# This file is a part of PYGPT package #
|
5
|
+
# Website: https://pygpt.net #
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
|
+
# MIT License #
|
8
|
+
# Created By : Marcin Szczygliński #
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
|
+
# ================================================== #
|
11
|
+
|
12
|
+
from pygpt_net.item.model import ModelItem
|
13
|
+
from .gpt import Gpt
|
14
|
+
|
15
|
+
class Base:
|
16
|
+
def __init__(self, window=None):
|
17
|
+
"""
|
18
|
+
Base prompt templates
|
19
|
+
|
20
|
+
:param window: Window instance
|
21
|
+
"""
|
22
|
+
self.window = window
|
23
|
+
self.providers = {
|
24
|
+
"gpt" : Gpt(window)
|
25
|
+
}
|
26
|
+
self.model_prompts = [
|
27
|
+
"cmd",
|
28
|
+
"cmd.extra",
|
29
|
+
"cmd.extra.assistants",
|
30
|
+
]
|
31
|
+
|
32
|
+
def get(
|
33
|
+
self,
|
34
|
+
prompt: str,
|
35
|
+
mode: str = None,
|
36
|
+
model: ModelItem = None
|
37
|
+
) -> str:
|
38
|
+
"""
|
39
|
+
Get system prompt content
|
40
|
+
|
41
|
+
CMD/TOOL EXECUTE prompts:
|
42
|
+
- cmd
|
43
|
+
- cmd.extra
|
44
|
+
- cmd.extra.assistants
|
45
|
+
|
46
|
+
:param prompt: id of the prompt
|
47
|
+
:param mode: mode
|
48
|
+
:param model: model item
|
49
|
+
:return: text content
|
50
|
+
"""
|
51
|
+
return self.get_db(prompt, mode, model) # from db
|
52
|
+
|
53
|
+
def get_db(
|
54
|
+
self,
|
55
|
+
prompt: str,
|
56
|
+
mode: str = None,
|
57
|
+
model: ModelItem = None
|
58
|
+
) -> str:
|
59
|
+
"""
|
60
|
+
Get system prompt content from database
|
61
|
+
|
62
|
+
:param prompt: id of the prompt
|
63
|
+
:param mode: mode
|
64
|
+
:param model: model item
|
65
|
+
:return: text content
|
66
|
+
"""
|
67
|
+
key = "prompt." + prompt
|
68
|
+
if self.window.core.config.has(key):
|
69
|
+
return str(self.window.core.config.get(key))
|
70
|
+
return ""
|
@@ -0,0 +1,46 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
# ================================================== #
|
4
|
+
# This file is a part of PYGPT package #
|
5
|
+
# Website: https://pygpt.net #
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
|
+
# MIT License #
|
8
|
+
# Created By : Marcin Szczygliński #
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
|
+
# ================================================== #
|
11
|
+
|
12
|
+
from pygpt_net.item.model import ModelItem
|
13
|
+
|
14
|
+
|
15
|
+
class Gpt:
|
16
|
+
def __init__(self, window=None):
|
17
|
+
"""
|
18
|
+
GPT prompt templates
|
19
|
+
|
20
|
+
:param window: Window instance
|
21
|
+
"""
|
22
|
+
self.window = window
|
23
|
+
|
24
|
+
def get(
|
25
|
+
self,
|
26
|
+
prompt: str,
|
27
|
+
mode: str = None,
|
28
|
+
model: ModelItem = None
|
29
|
+
) -> str:
|
30
|
+
"""
|
31
|
+
Get system prompt content
|
32
|
+
|
33
|
+
CMD/TOOL EXECUTE prompts:
|
34
|
+
- cmd
|
35
|
+
- cmd.extra
|
36
|
+
- cmd.extra.assistants
|
37
|
+
|
38
|
+
:param prompt: id of the prompt
|
39
|
+
:param mode: mode
|
40
|
+
:param model: model item
|
41
|
+
:return: text content
|
42
|
+
"""
|
43
|
+
key = "prompt." + prompt
|
44
|
+
if self.window.core.config.has(key):
|
45
|
+
return str(self.window.core.config.get(key))
|
46
|
+
return ""
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os
|
@@ -62,7 +62,11 @@ class Template:
|
|
62
62
|
# sort by name
|
63
63
|
self.prompts = dict(sorted(self.prompts.items(), key=lambda item: item[1]['name']))
|
64
64
|
|
65
|
-
def to_menu_options(
|
65
|
+
def to_menu_options(
|
66
|
+
self,
|
67
|
+
menu,
|
68
|
+
parent: str = "global"
|
69
|
+
):
|
66
70
|
"""
|
67
71
|
Convert prompts to menu options
|
68
72
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
{
|
2
2
|
"__meta__": {
|
3
|
-
"version": "2.5.
|
4
|
-
"app.version": "2.5.
|
3
|
+
"version": "2.5.2",
|
4
|
+
"app.version": "2.5.2",
|
5
5
|
"updated_at": "2025-02-01T00:00:00"
|
6
6
|
},
|
7
7
|
"items": {
|
@@ -265,7 +265,7 @@
|
|
265
265
|
},
|
266
266
|
"deepseek_ollama_r1_1.5b": {
|
267
267
|
"id": "deepseek-r1-1.5b",
|
268
|
-
"name": "
|
268
|
+
"name": "deepseek-r1-1.5b",
|
269
269
|
"mode": [
|
270
270
|
"llama_index",
|
271
271
|
"agent",
|
@@ -316,7 +316,7 @@
|
|
316
316
|
},
|
317
317
|
"deepseek_ollama_r1_7b": {
|
318
318
|
"id": "deepseek-r1-7b",
|
319
|
-
"name": "
|
319
|
+
"name": "deepseek-r1-7b",
|
320
320
|
"mode": [
|
321
321
|
"llama_index",
|
322
322
|
"agent",
|
@@ -367,7 +367,7 @@
|
|
367
367
|
},
|
368
368
|
"deepseek_ollama_r1_14b": {
|
369
369
|
"id": "deepseek-r1-14b",
|
370
|
-
"name": "
|
370
|
+
"name": "deepseek-r1-14b",
|
371
371
|
"mode": [
|
372
372
|
"llama_index",
|
373
373
|
"agent",
|
@@ -418,7 +418,7 @@
|
|
418
418
|
},
|
419
419
|
"deepseek_ollama_r1_32b": {
|
420
420
|
"id": "deepseek-r1-32b",
|
421
|
-
"name": "
|
421
|
+
"name": "deepseek-r1-32b",
|
422
422
|
"mode": [
|
423
423
|
"llama_index",
|
424
424
|
"agent",
|
@@ -469,7 +469,7 @@
|
|
469
469
|
},
|
470
470
|
"deepseek_ollama_r1_70b": {
|
471
471
|
"id": "deepseek-r1-70b",
|
472
|
-
"name": "
|
472
|
+
"name": "deepseek-r1-70b",
|
473
473
|
"mode": [
|
474
474
|
"llama_index",
|
475
475
|
"agent",
|
@@ -520,7 +520,7 @@
|
|
520
520
|
},
|
521
521
|
"deepseek_ollama_r1_671b": {
|
522
522
|
"id": "deepseek-r1-671b",
|
523
|
-
"name": "
|
523
|
+
"name": "deepseek-r1-671b",
|
524
524
|
"mode": [
|
525
525
|
"llama_index",
|
526
526
|
"agent",
|
@@ -570,8 +570,8 @@
|
|
570
570
|
"default": false
|
571
571
|
},
|
572
572
|
"deepseek_ollama_v3": {
|
573
|
-
"id": "deepseek-v3",
|
574
|
-
"name": "
|
573
|
+
"id": "deepseek-v3:671b",
|
574
|
+
"name": "deepseek-v3:671b",
|
575
575
|
"mode": [
|
576
576
|
"llama_index",
|
577
577
|
"agent",
|
@@ -586,7 +586,7 @@
|
|
586
586
|
"args": [
|
587
587
|
{
|
588
588
|
"name": "model",
|
589
|
-
"value": "deepseek-v3",
|
589
|
+
"value": "deepseek-v3:671b",
|
590
590
|
"type": "str"
|
591
591
|
}
|
592
592
|
],
|
@@ -605,7 +605,7 @@
|
|
605
605
|
"args": [
|
606
606
|
{
|
607
607
|
"name": "model",
|
608
|
-
"value": "deepseek-v3",
|
608
|
+
"value": "deepseek-v3:671b",
|
609
609
|
"type": "str"
|
610
610
|
}
|
611
611
|
],
|
pygpt_net/data/config/modes.json
CHANGED
@@ -966,7 +966,7 @@ tip.tokens.input = Token: Benutzereingabeaufforderung + Systemaufforderung + Kon
|
|
966
966
|
tip.toolbox.assistants = Die Liste der Assistenten zeigt die erstellten Assistenten, die auf dem entfernten Server arbeiten. Alle Änderungen werden mit dem entfernten Assistenten synchronisiert.
|
967
967
|
tip.toolbox.ctx = Erstellen Sie so viele Gesprächskontexte, wie Sie benötigen; Sie können jederzeit zu ihnen zurückkehren.
|
968
968
|
tip.toolbox.indexes = Durch das Indizieren von Gesprächen und Dateien können Sie das verfügbare Wissen mit Ihren eigenen Daten und Gesprächsverläufen erweitern.
|
969
|
-
tip.toolbox.mode = Sie können den Arbeitsmodus und das Modell in Echtzeit ändern.
|
969
|
+
tip.toolbox.mode = Sie können den Arbeitsmodus und das Modell in Echtzeit ändern. Um andere Modelle als GPT zu verwenden, nutzen Sie den Modus Chat mit Dateien.
|
970
970
|
tip.toolbox.presets = Erstellen Sie Voreinstellungen mit verschiedenen Konfigurationen, um schnell zwischen verschiedenen Einstellungen wie dem Systemprompt und anderen zu wechseln.
|
971
971
|
tip.toolbox.prompt = Die aktuelle Systemeingabeaufforderung kann in Echtzeit geändert werden. Um Werkzeuge aus Plugins zu aktivieren, aktivieren Sie die Option "+ Werkzeuge."
|
972
972
|
toolbox.agent.auto_stop.label = Automatischer Stopp
|
@@ -1210,7 +1210,7 @@ tip.tokens.input = Tokens: input prompt + system prompt + context + extra + atta
|
|
1210
1210
|
tip.toolbox.assistants = The list of assistants shows the assistants created and operating on the remote server. Any changes will be synchronized with the remote assistant.
|
1211
1211
|
tip.toolbox.ctx = Create as many conversation contexts as you need; you can return to them at any time.
|
1212
1212
|
tip.toolbox.indexes = By indexing conversations and files, you can expand the available knowledge with your own data and conversation history.
|
1213
|
-
tip.toolbox.mode = You can change the working mode and model in real-time.
|
1213
|
+
tip.toolbox.mode = You can change the working mode and model in real-time. To use models other than GPT, use the Chat with Files mode.
|
1214
1214
|
tip.toolbox.presets = Create presets with different configurations to quickly switch between various settings, such as the system prompt and others.
|
1215
1215
|
tip.toolbox.prompt = The current system prompt can be modified in real-time. To enable tools from plugins, enable the option "+ Tools."
|
1216
1216
|
toolbox.agent.auto_stop.label = Auto-stop
|
@@ -966,7 +966,7 @@ tip.tokens.input = Fichas: indicación del usuario + indicación del sistema + c
|
|
966
966
|
tip.toolbox.assistants = La lista de asistentes muestra los asistentes creados y operando en el servidor remoto. Cualquier cambio se sincronizará con el asistente remoto.
|
967
967
|
tip.toolbox.ctx = Crea tantos contextos de conversación como necesites; puedes volver a ellos en cualquier momento.
|
968
968
|
tip.toolbox.indexes = Al indexar conversaciones y archivos, puedes ampliar el conocimiento disponible con tus propios datos e historial de conversaciones.
|
969
|
-
tip.toolbox.mode = Puedes cambiar el modo de trabajo y el modelo en tiempo real.
|
969
|
+
tip.toolbox.mode = Puedes cambiar el modo de trabajo y el modelo en tiempo real. Para usar modelos distintos a GPT, utiliza el modo Chat con archivos.
|
970
970
|
tip.toolbox.presets = Crea preajustes con diferentes configuraciones para cambiar rápidamente entre varios ajustes, como el prompt del sistema y otros.
|
971
971
|
tip.toolbox.prompt = La solicitud del sistema actual se puede modificar en tiempo real. Para habilitar herramientas desde complementos, habilite la opción "+ Herramientas."
|
972
972
|
toolbox.agent.auto_stop.label = Auto-parada
|
@@ -966,7 +966,7 @@ tip.tokens.input = Jetons: invite de l'utilisateur + invite système + contexte
|
|
966
966
|
tip.toolbox.assistants = La liste des assistants montre les assistants créés et opérant sur le serveur distant. Tout changement sera synchronisé avec l'assistant distant.
|
967
967
|
tip.toolbox.ctx = Créez autant de contextes de conversation que vous en avez besoin ; vous pouvez y revenir à tout moment.
|
968
968
|
tip.toolbox.indexes = En indexant des conversations et des fichiers, vous pouvez étendre les connaissances disponibles avec vos propres données et historique de conversation.
|
969
|
-
tip.toolbox.mode = Vous pouvez changer le mode de travail et le modèle en temps réel.
|
969
|
+
tip.toolbox.mode = Vous pouvez changer le mode de travail et le modèle en temps réel. Pour utiliser des modèles autres que GPT, utilisez le mode Chat avec fichiers.
|
970
970
|
tip.toolbox.presets = Créez des préréglages avec différentes configurations pour basculer rapidement entre divers réglages, tels que l'invite système et d'autres.
|
971
971
|
tip.toolbox.prompt = L'invite système actuelle peut être modifiée en temps réel. Pour activer les outils à partir des plugins, activez l'option "+ Outils."
|
972
972
|
toolbox.agent.auto_stop.label = Arrêt automatique
|
@@ -966,7 +966,7 @@ tip.tokens.input = Gettoni: prompt dell'utente + prompt di sistema + contesto +
|
|
966
966
|
tip.toolbox.assistants = L'elenco degli assistenti mostra gli assistenti creati e operanti sul сервер remoto. Tutte le modifiche saranno sincronizzate con l'assistente remoto.
|
967
967
|
tip.toolbox.ctx = Crea quanti contesti di conversazione hai bisogno; puoi ritornarci in qualsiasi momento.
|
968
968
|
tip.toolbox.indexes = Indicizzando conversazioni e файлы, puoi espandere le conoscenze disponibili con i tuoi propri dati e storico delle conversazioni.
|
969
|
-
tip.toolbox.mode = Puoi cambiare la modalità di lavoro e il modello in tempo reale.
|
969
|
+
tip.toolbox.mode = Puoi cambiare la modalità di lavoro e il modello in tempo reale. Per utilizzare modelli diversi da GPT, usa la modalità Chat con file.
|
970
970
|
tip.toolbox.presets = Crea preset con diverse configurazioni per passare rapidamente tra varie impostazioni, come il prompt del sistema e altri.
|
971
971
|
tip.toolbox.prompt = Il prompt di sistema corrente può essere modificato in tempo reale. Per abilitare strumenti dai plugin, abilita l'opzione "+ Strumenti."
|
972
972
|
toolbox.agent.auto_stop.label = Arresto automatico
|
@@ -967,7 +967,7 @@ tip.tokens.input = Tokeny: prompt użytkownika + systemowy prompt + kontekst + d
|
|
967
967
|
tip.toolbox.assistants = Lista asystentów pokazuje asystentów stworzonych i działających na zdalnym serwerze. Wszelkie zmiany zostaną zsynchronizowane ze zdalnym asystentem.
|
968
968
|
tip.toolbox.ctx = Twórz tyle kontekstów rozmów, ile potrzebujesz; możesz do nich wrócić w dowolnym momencie.
|
969
969
|
tip.toolbox.indexes = Indeksując rozmowy i pliki, możesz rozszerzyć dostępną wiedzę o własne dane i historię rozmów.
|
970
|
-
tip.toolbox.mode = Możesz zmienić tryb pracy i model w czasie rzeczywistym.
|
970
|
+
tip.toolbox.mode = Możesz zmienić tryb pracy i model w czasie rzeczywistym. Aby użyć modeli innych niż GPT, użyj trybu Czat z plikami.
|
971
971
|
tip.toolbox.presets = Twórz presety z różnymi konfiguracjami, aby szybko przełączać się między różnymi ustawieniami, takimi jak prompt systemowy i inne.
|
972
972
|
tip.toolbox.prompt = Aktualna podpowiedź systemu może być modyfikowana w czasie rzeczywistym. Aby włączyć narzędzia z wtyczek, włącz opcję "+ Narzędzia."
|
973
973
|
toolbox.agent.auto_stop.label = Auto-stop
|
@@ -966,7 +966,7 @@ tip.tokens.input = Токени: запит користувача + систе
|
|
966
966
|
tip.toolbox.assistants = Список асистентів показує асистентів, створених і що працюють на віддаленому сервері. Будь-які зміни будуть синхронізовані з віддаленим асистентом.
|
967
967
|
tip.toolbox.ctx = Створіть стільки контекстів розмов, як вам потрібно; ви можете повернутися до них у будь-який час.
|
968
968
|
tip.toolbox.indexes = Індексуючи розмови та файли, ви можете розширити доступні знання зі своїми власними даними та історією розмов.
|
969
|
-
tip.toolbox.mode = Ви можете змінити робочий режим та модель в реальному часі.
|
969
|
+
tip.toolbox.mode = Ви можете змінити робочий режим та модель в реальному часі. Щоб використовувати моделі, відмінні від GPT, використовуйте режим Чат з файлами.
|
970
970
|
tip.toolbox.presets = Створіть пресети з різними конфігураціями для швидкого перемикання між різними налаштуваннями, такими як системний сповіщення та інші.
|
971
971
|
tip.toolbox.prompt = Поточну системну підказку можна змінювати в режимі реального часу. Щоб увімкнути інструменти з плагінів, увімкніть опцію "+ Інструменти."
|
972
972
|
toolbox.agent.auto_stop.label = Авто-стоп
|
@@ -1082,7 +1082,7 @@ tip.tokens.input = 代币:用户输入提示 + 系统提示 + 上下文 + 额
|
|
1082
1082
|
tip.toolbox.assistants = 助手列表顯示在遠程服務器上創建和運行的助手。任何更改都將與遠程助手同步。
|
1083
1083
|
tip.toolbox.ctx = 創建所需數量的對話上下文;您隨時可以返回它們。
|
1084
1084
|
tip.toolbox.indexes = 通過索引對話和文件,您可以用自己的數據和對話歷史擴展可用知識。
|
1085
|
-
tip.toolbox.mode =
|
1085
|
+
tip.toolbox.mode = 您可以實時更換工作模式和模型。要使用非GPT模型,请使用“文件聊天模式”模式。
|
1086
1086
|
tip.toolbox.presets = 創建具有不同配置的預設,以便快速切換不同設置,例如系統提示等。
|
1087
1087
|
tip.toolbox.prompt = 当前系统提示可以实时修改。要启用来自插件的工具,请启用“+ 工具”选项。
|
1088
1088
|
toolbox.agent.auto_stop.label = 自動停止
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from pygpt_net.core.types import (
|
@@ -86,7 +86,7 @@ class Plugin(BasePlugin):
|
|
86
86
|
Event.CMD_SYNTAX_INLINE, # inline is allowed
|
87
87
|
Event.CMD_SYNTAX,
|
88
88
|
]:
|
89
|
-
if not self.is_native_cmd(): # only if native commands are enabled, otherwise use prompt only
|
89
|
+
if not self.is_native_cmd() and "force" not in data: # only if native commands are enabled, otherwise use prompt only
|
90
90
|
return
|
91
91
|
|
92
92
|
self.cmd_syntax(data)
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.01
|
9
|
+
# Updated Date: 2025.02.01 11:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from packaging.version import parse as parse_version, Version
|
@@ -481,6 +481,15 @@ class Patch:
|
|
481
481
|
# add o1, DeepSeek R1, V3
|
482
482
|
updated = True
|
483
483
|
|
484
|
+
# < 2.5.2 <--- update names to models IDs
|
485
|
+
if old < parse_version("2.5.2"):
|
486
|
+
print("Migrating models from < 2.5.2...")
|
487
|
+
for id in data:
|
488
|
+
model = data[id]
|
489
|
+
if model.name.startswith("DeepSeek Ollama"):
|
490
|
+
model.name = model.id
|
491
|
+
updated = True
|
492
|
+
|
484
493
|
# update file
|
485
494
|
if updated:
|
486
495
|
data = dict(sorted(data.items()))
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: pygpt-net
|
3
|
-
Version: 2.5.
|
3
|
+
Version: 2.5.2
|
4
4
|
Summary: Desktop AI Assistant powered by models: OpenAI o1, GPT-4o, GPT-4, GPT-4 Vision, GPT-3.5, DALL-E 3, Llama 3, Mistral, Gemini, Claude, DeepSeek, Bielik, and other models supported by Langchain, Llama Index, and Ollama. Features include chatbot, text completion, image generation, vision analysis, speech-to-text, internet access, file handling, command execution and more.
|
5
5
|
Home-page: https://pygpt.net
|
6
6
|
License: MIT
|
@@ -94,7 +94,7 @@ Description-Content-Type: text/markdown
|
|
94
94
|
|
95
95
|
[](https://snapcraft.io/pygpt)
|
96
96
|
|
97
|
-
Release: **2.5.
|
97
|
+
Release: **2.5.2** | build: **2025.02.01** | Python: **>=3.10, <3.13**
|
98
98
|
|
99
99
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
100
100
|
>
|
@@ -4052,6 +4052,11 @@ may consume additional tokens that are not displayed in the main window.
|
|
4052
4052
|
|
4053
4053
|
## Recent changes:
|
4054
4054
|
|
4055
|
+
**2.5.2 (2025-02-01)**
|
4056
|
+
|
4057
|
+
- Fix: spinner update after inline image generation.
|
4058
|
+
- Added Ollama suffix to Ollama-models in models list.
|
4059
|
+
|
4055
4060
|
**2.5.1 (2025-02-01)**
|
4056
4061
|
|
4057
4062
|
- PySide6 upgraded to 6.6.2.
|
@@ -1,9 +1,9 @@
|
|
1
|
-
CHANGELOG.md,sha256=
|
2
|
-
README.md,sha256=
|
1
|
+
CHANGELOG.md,sha256=RoNGA5ISzeyNYy3--2oqecKqpEZfwWQKiUbsL2vtf94,82641
|
2
|
+
README.md,sha256=dkYi2fx_OMyuezDKGRD0oNnpiT5kHqfZoIS6enq9ERI,163686
|
3
3
|
icon.png,sha256=CzcINJaU23a9hNjsDlDNbyuiEvKZ4Wg6DQVYF6SpuRg,13970
|
4
|
-
pygpt_net/CHANGELOG.txt,sha256=
|
4
|
+
pygpt_net/CHANGELOG.txt,sha256=MKbqVBddbfbF65ctou2uBlcatkNjjseyLy9lqZ5fzj4,81125
|
5
5
|
pygpt_net/LICENSE,sha256=dz9sfFgYahvu2NZbx4C1xCsVn9GVer2wXcMkFRBvqzY,1146
|
6
|
-
pygpt_net/__init__.py,sha256=
|
6
|
+
pygpt_net/__init__.py,sha256=P2LrRTQJye9esdRAsPS9bRODtg6k4yjZT14q84m6Ft0,1372
|
7
7
|
pygpt_net/app.py,sha256=XXjn9XaKHGRcsHN8mMuqbRHAg8_Da0GLmACUU9ddjBc,16217
|
8
8
|
pygpt_net/config.py,sha256=Qc1FOBtTf3O6A6-6KoqUGtoJ0u8hXQeowvCVbZFwtik,16405
|
9
9
|
pygpt_net/container.py,sha256=BemiVZPpPNIzfB-ZvnZeeBPFu-AcX2c30OqYFylEjJc,4023
|
@@ -33,8 +33,8 @@ pygpt_net/controller/chat/audio.py,sha256=QsU36McxqlRoP6B-NSeck968g1M8JhlLkLwGLu
|
|
33
33
|
pygpt_net/controller/chat/command.py,sha256=3fNYvgt9NmCZPNrlqfLXivmN63ZRsuFseNjj1ZLnwts,3293
|
34
34
|
pygpt_net/controller/chat/common.py,sha256=-BJa2wHRHLDY-BHgp36GFhSr0yGNclkJGMiJQDktHaA,13958
|
35
35
|
pygpt_net/controller/chat/files.py,sha256=VFiiTeWTYR15Nwf1CTLEmeXqlmRHzNQVkNaU6hY2Gz4,2846
|
36
|
-
pygpt_net/controller/chat/image.py,sha256=
|
37
|
-
pygpt_net/controller/chat/input.py,sha256=
|
36
|
+
pygpt_net/controller/chat/image.py,sha256=XghPvTP8n3DxFgRrZ3bCfeLNTvOKSxbl-ewTwUTd4HM,8308
|
37
|
+
pygpt_net/controller/chat/input.py,sha256=FD3s1ytwvxSIje_e50BPNGq9UUV9XNhk0qUvoaNMeFo,10244
|
38
38
|
pygpt_net/controller/chat/output.py,sha256=VuziVuI9Lj_4kZmTWvXg8t2tq4w9uD7J1g2MqlMCV6s,9272
|
39
39
|
pygpt_net/controller/chat/render.py,sha256=h23QCvMDIAaCpInqwwADa4G43sSpSn-CE5celnk1LSc,17206
|
40
40
|
pygpt_net/controller/chat/response.py,sha256=UnTnnn2on-Qg2_T_QcQcklTCcuq6XhyLLxs1fn-D9Tg,9450
|
@@ -51,7 +51,7 @@ pygpt_net/controller/config/field/dictionary.py,sha256=E8b3Quid_kdQQ54fuZJ4GdcPo
|
|
51
51
|
pygpt_net/controller/config/field/input.py,sha256=081bzm0-MSN6UYIsyDS4gAEhgojWswCNjGqTyyg4Ric,3663
|
52
52
|
pygpt_net/controller/config/field/slider.py,sha256=2XToxPkIvfRPcANa-um6HDQ8rLqpDGynDN05ocKdE1w,4692
|
53
53
|
pygpt_net/controller/config/field/textarea.py,sha256=CySGd21ljR3v3DX9V2WwSvmKVEihD547rnHiRngWuuc,2398
|
54
|
-
pygpt_net/controller/config/placeholder.py,sha256=
|
54
|
+
pygpt_net/controller/config/placeholder.py,sha256=lhr7bqIT8G-UolQo05kxzaOiOakeHlMiCCkbIOReoHk,12532
|
55
55
|
pygpt_net/controller/ctx/__init__.py,sha256=K1LslZqoIPtTphLDcrTxiqqJ6Fx1EZkJkbPEUCRws9Y,33913
|
56
56
|
pygpt_net/controller/ctx/common.py,sha256=yz1s4kVfxlRpd0XW_sygbmer66LqLxQA1e6O8Pz7FL4,6380
|
57
57
|
pygpt_net/controller/ctx/extra.py,sha256=eDl0_iu80pRtyMX5ub52mjvOo_xo-vb1kZmTt0Idyoo,8219
|
@@ -78,7 +78,7 @@ pygpt_net/controller/lang/settings.py,sha256=awPEshWbHlOt11Zyg_uQKlbYjvABXrQ7QMH
|
|
78
78
|
pygpt_net/controller/launcher/__init__.py,sha256=om6aEZx31cSiCuShnDHp5Fs-Lj6Rb_pmbOO5fBweEWU,1899
|
79
79
|
pygpt_net/controller/layout/__init__.py,sha256=9R30zrZtvedAf1OxQLzxDWt8o2XirUq0bkcFRnpCztg,11433
|
80
80
|
pygpt_net/controller/mode/__init__.py,sha256=TY3y5fD8kpqLCmDyyCoEL_1OTSOXLnHVdIvH2lGUTew,7303
|
81
|
-
pygpt_net/controller/model/__init__.py,sha256=
|
81
|
+
pygpt_net/controller/model/__init__.py,sha256=W1JOEL_hk6kcGyQGDRmwtaTLndQAC8x7S2KMzSyWLJM,5839
|
82
82
|
pygpt_net/controller/model/editor.py,sha256=8UaNi_Ui5ooPdio9_mw4ECihTxE7Iq5n5hDl41D3hu0,12675
|
83
83
|
pygpt_net/controller/notepad/__init__.py,sha256=wgnvLtTBuq6W6Eio3BgjrESPRNLRRVyqUutpBNb9T7k,9267
|
84
84
|
pygpt_net/controller/painter/__init__.py,sha256=1Ekmr2a3irDkSb2wowiPXhW59rfdZOW1tdbxeubph-k,2747
|
@@ -136,7 +136,7 @@ pygpt_net/core/camera/__init__.py,sha256=K74D_4Q_GN1M66pTH6H2D1em_FlWohGLgb0aDQ6
|
|
136
136
|
pygpt_net/core/chain/__init__.py,sha256=C7Xm88bRblcyM4e0wZMFG-6SQCdw_frXN9kqnWzce60,3541
|
137
137
|
pygpt_net/core/chain/chat.py,sha256=5LxPWHkocjrIAAwrdDH1ss6knAnh4_owfbHPsOQYSws,5238
|
138
138
|
pygpt_net/core/chain/completion.py,sha256=GGRA-q6sQgPnSibiwHBwk7jgT0MgOkka1_jK2-IiBPg,5698
|
139
|
-
pygpt_net/core/command/__init__.py,sha256=
|
139
|
+
pygpt_net/core/command/__init__.py,sha256=0in4peAOe2cH3B0CA4Ni_LhEU3AfKaSG9iMf1d0z0Kk,24303
|
140
140
|
pygpt_net/core/ctx/__init__.py,sha256=5W5ZPYE5rkbBRRdL7mT9MFvnWZKjib0cMBiC-9HFl28,43311
|
141
141
|
pygpt_net/core/ctx/bag.py,sha256=-LRhttDRiQkw1Msl3kbGQYaY9w8zqn1o0miNRdqjHtQ,1286
|
142
142
|
pygpt_net/core/ctx/container.py,sha256=tdPHPRfTi8yGY1MZGgFtYtx2lvc5K9OTqhjde16wivY,4232
|
@@ -196,16 +196,19 @@ pygpt_net/core/info/__init__.py,sha256=YJEDJnGVMmMp0sQ0tEDyri6Kr94CopcZF6L97w9dX
|
|
196
196
|
pygpt_net/core/installer/__init__.py,sha256=I7ALQy8P3SG7iOY04gDQpRVmSFNCtk83sz90-ER9t9Q,2022
|
197
197
|
pygpt_net/core/llm/__init__.py,sha256=cns_L7QeKXwq22Jj09gOG5PPnX0PxB3dagcdiXZvBFI,1291
|
198
198
|
pygpt_net/core/locale/__init__.py,sha256=KcG4lwtiI7mqtS8ojX2A2IuO0kCYGQP0-bwuBqzx_mc,5484
|
199
|
-
pygpt_net/core/models/__init__.py,sha256=
|
199
|
+
pygpt_net/core/models/__init__.py,sha256=W4UaqC8KmlgesV151OGHOLbUDf2GPGNs7SWe7mCL2Fo,10043
|
200
|
+
pygpt_net/core/models/ollama.py,sha256=RGE1mVPpNIRuCqGIvTQEqEQDirwj9lUy0R5rGG229LM,1388
|
200
201
|
pygpt_net/core/modes/__init__.py,sha256=d3Wju5zo8DaUxPINPOAkOaKy0uuL9DryVubB4xiEqAU,3110
|
201
202
|
pygpt_net/core/notepad/__init__.py,sha256=lsgn4zXapg51227oTO3fr93FyltuEMEyJCNSAOki61o,4246
|
202
203
|
pygpt_net/core/platforms/__init__.py,sha256=QygvsQadTpWW1K4_GraO38r7u82sYpgz3FI4iv_Dodw,4563
|
203
204
|
pygpt_net/core/plugins/__init__.py,sha256=gHrT61w1kVhcdTO0QTaitol5CV8h2bnrQaCG9eNVi-g,15106
|
204
205
|
pygpt_net/core/presets/__init__.py,sha256=fNUwFk5mNxBTf340uixDAPD-g5fZHBjZMdKAzIjAFb4,14747
|
205
206
|
pygpt_net/core/profile/__init__.py,sha256=ZpXKTwbViskCUDBn8JvxSk7wRFdfrwMfs7Gb3O7N_48,7840
|
206
|
-
pygpt_net/core/prompt/__init__.py,sha256=
|
207
|
+
pygpt_net/core/prompt/__init__.py,sha256=GpwM_rDOqAINMr8VG_9QdsvLsnEjTMvrJMMGa92Jzxg,7014
|
208
|
+
pygpt_net/core/prompt/base/__init__.py,sha256=NmejVxOy0iJTZ7UHxn8TzSF75U_MxFRO9fPAciVXrxc,1897
|
209
|
+
pygpt_net/core/prompt/base/gpt.py,sha256=IzjLOVd1CEswDUq4r4uo0UfDeiWovXUaU4OmfY_BR1g,1284
|
207
210
|
pygpt_net/core/prompt/custom.py,sha256=kexQrazSm_pCmHclTkVT2YId3aNiF53kg6UCSCFZ-KE,7849
|
208
|
-
pygpt_net/core/prompt/template.py,sha256=
|
211
|
+
pygpt_net/core/prompt/template.py,sha256=Uygs2W-Bw53SX6gZXjYKuHlPtbFGZf41VzR8244dfc0,3329
|
209
212
|
pygpt_net/core/render/__init__.py,sha256=19xPDIYeoDn3Sf1tpcvXtxLaaKkjs0nDQ7-4GqTfeRk,489
|
210
213
|
pygpt_net/core/render/base.py,sha256=s3XtAhymSmzGhyCGr2TA_8LDRZ-Uqk4QKEAOgx_ks4w,8735
|
211
214
|
pygpt_net/core/render/markdown/__init__.py,sha256=19xPDIYeoDn3Sf1tpcvXtxLaaKkjs0nDQ7-4GqTfeRk,489
|
@@ -247,9 +250,9 @@ pygpt_net/css_rc.py,sha256=i13kX7irhbYCWZ5yJbcMmnkFp_UfS4PYnvRFSPF7XXo,11349
|
|
247
250
|
pygpt_net/data/audio/click_off.mp3,sha256=aNiRDP1pt-Jy7ija4YKCNFBwvGWbzU460F4pZWZDS90,65201
|
248
251
|
pygpt_net/data/audio/click_on.mp3,sha256=qfdsSnthAEHVXzeyN4LlC0OvXuyW8p7stb7VXtlvZ1k,65201
|
249
252
|
pygpt_net/data/audio/ok.mp3,sha256=LTiV32pEBkpUGBkKkcOdOFB7Eyt_QoP2Nv6c5AaXftk,32256
|
250
|
-
pygpt_net/data/config/config.json,sha256=
|
251
|
-
pygpt_net/data/config/models.json,sha256=
|
252
|
-
pygpt_net/data/config/modes.json,sha256=
|
253
|
+
pygpt_net/data/config/config.json,sha256=u8yrVPHqkMmRpnIeCj_R4AJaM1nxVMAmDBHZ9XEU6HQ,19884
|
254
|
+
pygpt_net/data/config/models.json,sha256=nVHDsHLAP3VCbcdLN0LCHyHog8SAeUJMSW7oKJqhaWI,79220
|
255
|
+
pygpt_net/data/config/modes.json,sha256=x7Pzn8NnrPHxclXnH8z9QtAreOMCFurYzMj8JZOaXTQ,1921
|
253
256
|
pygpt_net/data/config/presets/agent_openai.json,sha256=vMTR-soRBiEZrpJJHuFLWyx8a3Ez_BqtqjyXgxCAM_Q,733
|
254
257
|
pygpt_net/data/config/presets/agent_openai_assistant.json,sha256=awJw9lNTGpKML6SJUShVn7lv8AXh0oic7wBeyoN7AYs,798
|
255
258
|
pygpt_net/data/config/presets/agent_planner.json,sha256=a6Rv58Bnm2STNWB0Rw_dGhnsz6Lb3J8_GwsUVZaTIXc,742
|
@@ -1485,14 +1488,14 @@ pygpt_net/data/js/katex/fonts/KaTeX_Typewriter-Regular.woff,sha256=4U_tArGrp86fW
|
|
1485
1488
|
pygpt_net/data/js/katex/fonts/KaTeX_Typewriter-Regular.woff2,sha256=cdUX1ngneHz6vfGGkUzDNY7aU543kxlB8rL9SiH2jAs,13568
|
1486
1489
|
pygpt_net/data/js/katex/katex.min.css,sha256=lVaKnUaQNG4pI71WHffQZVALLQF4LMZEk4nOia8U9ow,23532
|
1487
1490
|
pygpt_net/data/js/katex/katex.min.js,sha256=KLASOtKS2x8pUxWVzCDmlWJ4jhuLb0vtrgakbD6gDDo,276757
|
1488
|
-
pygpt_net/data/locale/locale.de.ini,sha256=
|
1489
|
-
pygpt_net/data/locale/locale.en.ini,sha256=
|
1490
|
-
pygpt_net/data/locale/locale.es.ini,sha256=
|
1491
|
-
pygpt_net/data/locale/locale.fr.ini,sha256=
|
1492
|
-
pygpt_net/data/locale/locale.it.ini,sha256=
|
1493
|
-
pygpt_net/data/locale/locale.pl.ini,sha256=
|
1494
|
-
pygpt_net/data/locale/locale.uk.ini,sha256=
|
1495
|
-
pygpt_net/data/locale/locale.zh.ini,sha256=
|
1491
|
+
pygpt_net/data/locale/locale.de.ini,sha256=t4zR4P9gC7qxw8AfHjk4lV4mQGzzjVQN74mBrAxISP8,64241
|
1492
|
+
pygpt_net/data/locale/locale.en.ini,sha256=womiO7pcyNd7sAojhO2SQswNLeBCxf7d8qX8uq4xHNw,76867
|
1493
|
+
pygpt_net/data/locale/locale.es.ini,sha256=SMR7lgEpGOK1MAWJFvwdTICRZR2Zz9GCAxmIn6lHQ0U,64461
|
1494
|
+
pygpt_net/data/locale/locale.fr.ini,sha256=zSyjr4HGgJt8ooKv9UBkVag2tOeg2ZbRE-J4cKRNxaI,66483
|
1495
|
+
pygpt_net/data/locale/locale.it.ini,sha256=3WbsgyhElEuTUd74HRLIT5SFN06xLsCCO9My2hholUo,63215
|
1496
|
+
pygpt_net/data/locale/locale.pl.ini,sha256=wjmVHr72v-R-ZJIadEbCCy3Eb1mWRb8WZXQrUgeXh48,63310
|
1497
|
+
pygpt_net/data/locale/locale.uk.ini,sha256=HfHqZt31oTpoOBajAsxySBY2g5DT-5s9q35TVD8_K0s,88193
|
1498
|
+
pygpt_net/data/locale/locale.zh.ini,sha256=yFOgI_tF0Fd2P8f2SIXIwdMfkGkOmT1WgD_i6Nz1Cwc,64928
|
1496
1499
|
pygpt_net/data/locale/plugin.agent.de.ini,sha256=BY28KpfFvgfVYJzcw2o5ScWnR4uuErIYGyc3NVHlmTw,1714
|
1497
1500
|
pygpt_net/data/locale/plugin.agent.en.ini,sha256=88LkZUpilbV9l4QDbMyIdq_K9sbWt-CQPpavEttPjJU,1489
|
1498
1501
|
pygpt_net/data/locale/plugin.agent.es.ini,sha256=bqaJQne8HPKFVtZ8Ukzo1TSqVW41yhYbGUqW3j2x1p8,1680
|
@@ -1763,7 +1766,7 @@ pygpt_net/plugin/mailer/__init__.py,sha256=0mj7f43iAIQpywuQrt9WiR9fiupAEodQwu19K
|
|
1763
1766
|
pygpt_net/plugin/mailer/config.py,sha256=Sc5ezdsOevx50v3XHRZdf_dgw1x4wUhRY0z2vcfg7Dc,4828
|
1764
1767
|
pygpt_net/plugin/mailer/runner.py,sha256=xM-a6XWvJ8JwOzS2JRugBfxPj0CHVL7A8ZAmzC-keQM,9775
|
1765
1768
|
pygpt_net/plugin/mailer/worker.py,sha256=TL7f6dB7BLWlN0xvI7i5Cac5ii599aD3uuKXRUbCz_c,3693
|
1766
|
-
pygpt_net/plugin/openai_dalle/__init__.py,sha256=
|
1769
|
+
pygpt_net/plugin/openai_dalle/__init__.py,sha256=b3eghCthPtqTEpgwV-yNqi4J52sZrEc5zD84-PT6hMg,5182
|
1767
1770
|
pygpt_net/plugin/openai_dalle/config.py,sha256=yBsd_EvPlWJY0HhK2cgz0InsaYf4IK6eeww_OkrOHGA,4540
|
1768
1771
|
pygpt_net/plugin/openai_vision/__init__.py,sha256=Hi_n9iMFq2hS4YaVmkYL32r1KiAgib7DYLxMCYn7Ne8,10151
|
1769
1772
|
pygpt_net/plugin/openai_vision/config.py,sha256=8yP4znFYGA_14pexNT3T6mddNsf-wsRqXc5QT52N4Uc,4627
|
@@ -1847,7 +1850,7 @@ pygpt_net/provider/core/mode/patch.py,sha256=VS2KCYW05jxLd-lcStNY1k4fHKUUrVVLTdR
|
|
1847
1850
|
pygpt_net/provider/core/model/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
|
1848
1851
|
pygpt_net/provider/core/model/base.py,sha256=L1x2rHha8a8hnCUYxZr88utay1EWEx5qBXW_2acpAN0,1319
|
1849
1852
|
pygpt_net/provider/core/model/json_file.py,sha256=g0u1tbOm7QKutjD5mZLRwzmYmoIqA8b6bDQ6wKbucYM,6484
|
1850
|
-
pygpt_net/provider/core/model/patch.py,sha256=
|
1853
|
+
pygpt_net/provider/core/model/patch.py,sha256=u7K6-et8bAeBrhGXgmESC7FiM8JpeBhFpaO6TOGs8Vg,23248
|
1851
1854
|
pygpt_net/provider/core/notepad/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
|
1852
1855
|
pygpt_net/provider/core/notepad/base.py,sha256=7aPhild8cALTaN3JEbI0YrkIW1DRIycGQWTfsdH6WcQ,1323
|
1853
1856
|
pygpt_net/provider/core/notepad/db_sqlite/__init__.py,sha256=DQnVKJxvLq-6zlRlLk3MXSQZEObFtcQ5p5mEnuRzwYE,3104
|
@@ -2177,8 +2180,8 @@ pygpt_net/ui/widget/textarea/web.py,sha256=2LebPHa_e5lvBqnIVzjwsLcFMoc11BonXgAUs
|
|
2177
2180
|
pygpt_net/ui/widget/vision/__init__.py,sha256=8HT4tQFqQogEEpGYTv2RplKBthlsFKcl5egnv4lzzEw,488
|
2178
2181
|
pygpt_net/ui/widget/vision/camera.py,sha256=T8b5cmK6uhf_WSSxzPt_Qod8JgMnst6q8sQqRvgQiSA,2584
|
2179
2182
|
pygpt_net/utils.py,sha256=WtrdagJ-BlCjxGEEVq2rhsyAZMcU6JqltCXzOs823po,6707
|
2180
|
-
pygpt_net-2.5.
|
2181
|
-
pygpt_net-2.5.
|
2182
|
-
pygpt_net-2.5.
|
2183
|
-
pygpt_net-2.5.
|
2184
|
-
pygpt_net-2.5.
|
2183
|
+
pygpt_net-2.5.2.dist-info/LICENSE,sha256=rbPqNB_xxANH8hKayJyIcTwD4bj4Y2G-Mcm85r1OImM,1126
|
2184
|
+
pygpt_net-2.5.2.dist-info/METADATA,sha256=20sQekIH1PJlVXxd6yeL_oD4TFCYrv9YQA8SiQOiqtE,168641
|
2185
|
+
pygpt_net-2.5.2.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
2186
|
+
pygpt_net-2.5.2.dist-info/entry_points.txt,sha256=qvpII6UHIt8XfokmQWnCYQrTgty8FeJ9hJvOuUFCN-8,43
|
2187
|
+
pygpt_net-2.5.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|