pygpt-net 2.4.47__py3-none-any.whl → 2.4.49__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +13 -0
- README.md +15 -2
- pygpt_net/CHANGELOG.txt +13 -0
- pygpt_net/__init__.py +7 -3
- pygpt_net/controller/assistant/threads.py +6 -1
- pygpt_net/controller/dialogs/info.py +18 -1
- pygpt_net/controller/lang/mapping.py +3 -3
- pygpt_net/core/agents/tools.py +3 -3
- pygpt_net/core/ctx/__init__.py +7 -0
- pygpt_net/core/idx/__init__.py +16 -11
- pygpt_net/core/idx/chat.py +18 -22
- pygpt_net/core/idx/indexing.py +14 -12
- pygpt_net/core/idx/llm.py +3 -11
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/modes.json +3 -3
- pygpt_net/data/locale/locale.de.ini +2 -1
- pygpt_net/data/locale/locale.en.ini +4 -3
- pygpt_net/data/locale/locale.es.ini +2 -1
- pygpt_net/data/locale/locale.fr.ini +2 -1
- pygpt_net/data/locale/locale.it.ini +2 -1
- pygpt_net/data/locale/locale.pl.ini +3 -2
- pygpt_net/data/locale/locale.uk.ini +2 -1
- pygpt_net/data/locale/locale.zh.ini +2 -1
- pygpt_net/provider/llms/google.py +25 -2
- pygpt_net/provider/llms/openai.py +1 -1
- pygpt_net/provider/loaders/hub/github/issues.py +5 -5
- pygpt_net/provider/loaders/hub/json/base.py +2 -2
- pygpt_net/provider/loaders/hub/pandas_excel/base.py +2 -2
- pygpt_net/provider/loaders/hub/simple_csv/base.py +2 -2
- pygpt_net/provider/loaders/hub/yt/base.py +1 -1
- pygpt_net/provider/vector_stores/__init__.py +19 -11
- pygpt_net/provider/vector_stores/base.py +11 -7
- pygpt_net/provider/vector_stores/chroma.py +11 -5
- pygpt_net/provider/vector_stores/ctx_attachment.py +7 -5
- pygpt_net/provider/vector_stores/elasticsearch.py +11 -5
- pygpt_net/provider/vector_stores/pinecode.py +11 -5
- pygpt_net/provider/vector_stores/redis.py +11 -5
- pygpt_net/provider/vector_stores/simple.py +7 -5
- pygpt_net/provider/vector_stores/temp.py +7 -5
- pygpt_net/ui/layout/chat/output.py +1 -2
- pygpt_net/ui/menu/__init__.py +4 -1
- pygpt_net/ui/menu/about.py +6 -7
- pygpt_net/ui/menu/donate.py +46 -0
- pygpt_net/ui/widget/anims/toggles.py +1 -1
- pygpt_net/utils.py +20 -7
- {pygpt_net-2.4.47.dist-info → pygpt_net-2.4.49.dist-info}/METADATA +48 -33
- {pygpt_net-2.4.47.dist-info → pygpt_net-2.4.49.dist-info}/RECORD +51 -50
- {pygpt_net-2.4.47.dist-info → pygpt_net-2.4.49.dist-info}/LICENSE +0 -0
- {pygpt_net-2.4.47.dist-info → pygpt_net-2.4.49.dist-info}/WHEEL +0 -0
- {pygpt_net-2.4.47.dist-info → pygpt_net-2.4.49.dist-info}/entry_points.txt +0 -0
CHANGELOG.md
CHANGED
@@ -1,5 +1,18 @@
|
|
1
1
|
# CHANGELOG
|
2
2
|
|
3
|
+
## 2.4.49 (2025-01-16)
|
4
|
+
|
5
|
+
- Fix: stream render in Assistants mode.
|
6
|
+
- Fix: items remove in context regen/edit.
|
7
|
+
|
8
|
+
## 2.4.48 (2025-01-16)
|
9
|
+
|
10
|
+
- Fix: parsing lists in data loaders configuration.
|
11
|
+
- Fix: crash on Windows on PySide6 v6.6.0.
|
12
|
+
- Added Gemini embeddings to LlamaIndex settings.
|
13
|
+
- LlamaIndex upgraded to 0.12.11.
|
14
|
+
- Security updates.
|
15
|
+
|
3
16
|
## 2.4.47 (2025-01-14)
|
4
17
|
|
5
18
|
- Added support for Python 3.12.
|
README.md
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
[](https://snapcraft.io/pygpt)
|
4
4
|
|
5
|
-
Release: **2.4.
|
5
|
+
Release: **2.4.49** | build: **2025.01.16** | Python: **>=3.10, <3.13**
|
6
6
|
|
7
7
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
8
8
|
>
|
@@ -10,7 +10,7 @@ Release: **2.4.47** | build: **2025.01.14** | Python: **>=3.10, <3.13**
|
|
10
10
|
>
|
11
11
|
> Compiled version for Linux (`zip`) and Windows 10/11 (`msi`) 64-bit: https://pygpt.net/#download
|
12
12
|
>
|
13
|
-
> ❤️ Donate: https://www.buymeacoffee.com/szczyglis
|
13
|
+
> ❤️ Donate: https://www.buymeacoffee.com/szczyglis | https://github.com/sponsors/szczyglis-dev
|
14
14
|
|
15
15
|
## Overview
|
16
16
|
|
@@ -3952,6 +3952,19 @@ may consume additional tokens that are not displayed in the main window.
|
|
3952
3952
|
|
3953
3953
|
## Recent changes:
|
3954
3954
|
|
3955
|
+
**2.4.49 (2025-01-16)**
|
3956
|
+
|
3957
|
+
- Fix: stream render in Assistants mode.
|
3958
|
+
- Fix: items remove in context regen/edit.
|
3959
|
+
|
3960
|
+
**2.4.48 (2025-01-16)**
|
3961
|
+
|
3962
|
+
- Fix: parsing lists in data loaders configuration.
|
3963
|
+
- Fix: crash on Windows on PySide6 v6.6.0.
|
3964
|
+
- Added Gemini embeddings to LlamaIndex settings.
|
3965
|
+
- LlamaIndex upgraded to 0.12.11.
|
3966
|
+
- Security updates.
|
3967
|
+
|
3955
3968
|
**2.4.47 (2025-01-14)**
|
3956
3969
|
|
3957
3970
|
- Added support for Python 3.12.
|
pygpt_net/CHANGELOG.txt
CHANGED
@@ -1,3 +1,16 @@
|
|
1
|
+
2.4.49 (2025-01-16)
|
2
|
+
|
3
|
+
- Fix: stream render in Assistants mode.
|
4
|
+
- Fix: items remove in context regen/edit.
|
5
|
+
|
6
|
+
2.4.48 (2025-01-16)
|
7
|
+
|
8
|
+
- Fix: parsing lists in data loaders configuration.
|
9
|
+
- Fix: crash on Windows on PySide6 v6.6.0.
|
10
|
+
- Added Gemini embeddings to LlamaIndex settings.
|
11
|
+
- LlamaIndex upgraded to 0.12.11.
|
12
|
+
- Security updates.
|
13
|
+
|
1
14
|
2.4.47 (2025-01-14)
|
2
15
|
|
3
16
|
- Added support for Python 3.12.
|
pygpt_net/__init__.py
CHANGED
@@ -6,17 +6,18 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date: 2025.01.
|
9
|
+
# Updated Date: 2025.01.16 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
__author__ = "Marcin Szczygliński"
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
15
15
|
__license__ = "MIT"
|
16
|
-
__version__ = "2.4.
|
17
|
-
__build__ = "2025.01.
|
16
|
+
__version__ = "2.4.49"
|
17
|
+
__build__ = "2025.01.16"
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
20
|
+
__report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
|
20
21
|
__website__ = "https://pygpt.net"
|
21
22
|
__pypi__ = "https://pypi.org/project/pygpt-net"
|
22
23
|
__snap__ = "https://snapcraft.io/pygpt"
|
@@ -24,3 +25,6 @@ __donate__ = "https://pygpt.net/#donate"
|
|
24
25
|
__documentation__ = "https://pygpt.readthedocs.io"
|
25
26
|
__discord__ = "https://pygpt.net/discord"
|
26
27
|
__email__ = "info@pygpt.net"
|
28
|
+
__donate_coffee__ = "https://pygpt.net/donate/buymeacoffee"
|
29
|
+
__donate_paypal__ = "https://pygpt.net/donate/paypal"
|
30
|
+
__donate_github__ = "https://github.com/sponsors/szczyglis-dev"
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.01.16 04:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -442,6 +442,11 @@ class Threads(QObject):
|
|
442
442
|
'msg': trans('assistant.run.completed'),
|
443
443
|
})
|
444
444
|
self.window.dispatch(event)
|
445
|
+
event = RenderEvent(RenderEvent.RELOAD, {
|
446
|
+
"meta": ctx.meta,
|
447
|
+
"ctx": ctx,
|
448
|
+
})
|
449
|
+
self.window.dispatch(event)
|
445
450
|
self.window.controller.chat.common.show_response_tokens(ctx) # update tokens
|
446
451
|
|
447
452
|
def handle_status_error(self, ctx: CtxItem):
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.01.16 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import webbrowser
|
@@ -93,6 +93,23 @@ class Info:
|
|
93
93
|
"""Open discord page"""
|
94
94
|
webbrowser.open(self.window.meta['discord'])
|
95
95
|
|
96
|
+
def goto_report(self):
|
97
|
+
"""Open report a bug page"""
|
98
|
+
webbrowser.open(self.window.meta['report'])
|
99
|
+
|
100
|
+
def donate(self, id: str):
|
101
|
+
"""
|
102
|
+
Donate action
|
103
|
+
|
104
|
+
:param id: donate id
|
105
|
+
"""
|
106
|
+
if id == 'coffee':
|
107
|
+
webbrowser.open(self.window.meta['donate_coffee'])
|
108
|
+
elif id == 'paypal':
|
109
|
+
webbrowser.open(self.window.meta['donate_paypal'])
|
110
|
+
elif id == 'github':
|
111
|
+
webbrowser.open(self.window.meta['donate_github'])
|
112
|
+
|
96
113
|
def update_menu(self):
|
97
114
|
"""Update info menu"""
|
98
115
|
for id in self.ids:
|
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.01.16 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
from typing import Dict
|
@@ -364,6 +364,7 @@ class Mapping:
|
|
364
364
|
menu_title['menu.audio'] = 'menu.audio'
|
365
365
|
menu_title['menu.video'] = 'menu.video'
|
366
366
|
menu_title['menu.tools'] = 'menu.tools'
|
367
|
+
menu_title['menu.donate'] = 'menu.info.donate'
|
367
368
|
|
368
369
|
# menu text
|
369
370
|
menu_text = {}
|
@@ -393,9 +394,9 @@ class Mapping:
|
|
393
394
|
menu_text['info.docs'] = 'menu.info.docs'
|
394
395
|
menu_text['info.pypi'] = 'menu.info.pypi'
|
395
396
|
menu_text['info.snap'] = 'menu.info.snap'
|
396
|
-
menu_text['info.donate'] = 'menu.info.donate'
|
397
397
|
menu_text['info.website'] = 'menu.info.website'
|
398
398
|
menu_text['info.github'] = 'menu.info.github'
|
399
|
+
menu_text['info.report'] = 'menu.info.report'
|
399
400
|
menu_text['audio.output'] = 'menu.audio.output'
|
400
401
|
menu_text['audio.input'] = 'menu.audio.input'
|
401
402
|
menu_text['audio.control.plugin'] = 'menu.audio.control.plugin'
|
@@ -453,7 +454,6 @@ class Mapping:
|
|
453
454
|
tooltips['icon.audio.output'] = 'icon.audio.output'
|
454
455
|
tooltips['icon.audio.input'] = 'icon.audio.input'
|
455
456
|
tooltips['assistant.store.btn.refresh_status'] = 'dialog.assistant.store.btn.refresh_status'
|
456
|
-
tooltips['inline.vision'] = 'vision.checkbox.tooltip'
|
457
457
|
tooltips['agent.llama.loop.score'] = 'toolbox.agent.llama.loop.score.tooltip'
|
458
458
|
|
459
459
|
# menu tooltips
|
pygpt_net/core/agents/tools.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.01.16 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -54,8 +54,8 @@ class Tools:
|
|
54
54
|
# add query engine tool if idx is provided
|
55
55
|
idx = extra.get("agent_idx", None)
|
56
56
|
if idx is not None and idx != "_":
|
57
|
-
|
58
|
-
index = self.window.core.idx.storage.get(idx,
|
57
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=context.model)
|
58
|
+
index = self.window.core.idx.storage.get(idx, llm, embed_model) # get index
|
59
59
|
if index is not None:
|
60
60
|
query_engine = index.as_query_engine(similarity_top_k=3)
|
61
61
|
query_engine_tools = [
|
pygpt_net/core/ctx/__init__.py
CHANGED
@@ -856,6 +856,13 @@ class Ctx:
|
|
856
856
|
:param meta_id: meta_id
|
857
857
|
:param item_id: item_id
|
858
858
|
"""
|
859
|
+
items = self.get_items()
|
860
|
+
remove = False
|
861
|
+
for item in items:
|
862
|
+
if item.id == item_id:
|
863
|
+
remove = True
|
864
|
+
if remove:
|
865
|
+
items.remove(item)
|
859
866
|
return self.provider.remove_items_from(meta_id, item_id)
|
860
867
|
|
861
868
|
def truncate(self):
|
pygpt_net/core/idx/__init__.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.01.16 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import datetime
|
@@ -146,10 +146,11 @@ class Idx:
|
|
146
146
|
:param recursive: recursive indexing
|
147
147
|
:return: dict with indexed files (path -> id), list with errors
|
148
148
|
"""
|
149
|
-
|
149
|
+
llm, embed_model = self.llm.get_service_context()
|
150
150
|
index = self.storage.get(
|
151
151
|
id=idx,
|
152
|
-
|
152
|
+
llm=llm,
|
153
|
+
embed_model=embed_model,
|
153
154
|
) # get or create index
|
154
155
|
files, errors = self.indexing.index_files(
|
155
156
|
idx=idx,
|
@@ -182,10 +183,11 @@ class Idx:
|
|
182
183
|
:param from_ts: timestamp from
|
183
184
|
:return: num of indexed files, list with errors
|
184
185
|
"""
|
185
|
-
|
186
|
+
llm, embed_model = self.llm.get_service_context()
|
186
187
|
index = self.storage.get(
|
187
188
|
id=idx,
|
188
|
-
|
189
|
+
llm=llm,
|
190
|
+
embed_model=embed_model,
|
189
191
|
) # get or create index
|
190
192
|
num, errors = self.indexing.index_db_by_meta_id(
|
191
193
|
idx=idx,
|
@@ -215,10 +217,11 @@ class Idx:
|
|
215
217
|
:param from_ts: timestamp from
|
216
218
|
:return: num of indexed files, list with errors
|
217
219
|
"""
|
218
|
-
|
220
|
+
llm, embed_model = self.llm.get_service_context()
|
219
221
|
index = self.storage.get(
|
220
222
|
id=idx,
|
221
|
-
|
223
|
+
llm=llm,
|
224
|
+
embed_model=embed_model,
|
222
225
|
) # get or create index
|
223
226
|
num, errors = self.indexing.index_db_from_updated_ts(
|
224
227
|
idx=idx,
|
@@ -251,10 +254,11 @@ class Idx:
|
|
251
254
|
:param extra_args: extra args
|
252
255
|
:return: num of indexed, list with errors
|
253
256
|
"""
|
254
|
-
|
257
|
+
llm, embed_model = self.llm.get_service_context()
|
255
258
|
index = self.storage.get(
|
256
259
|
id=idx,
|
257
|
-
|
260
|
+
llm=llm,
|
261
|
+
embed_model=embed_model,
|
258
262
|
) # get or create index
|
259
263
|
n, errors = self.indexing.index_urls(
|
260
264
|
idx=idx,
|
@@ -294,10 +298,11 @@ class Idx:
|
|
294
298
|
# update config params
|
295
299
|
self.indexing.update_loader_args(type, config)
|
296
300
|
|
297
|
-
|
301
|
+
llm, embed_model = self.llm.get_service_context()
|
298
302
|
index = self.storage.get(
|
299
303
|
id=idx,
|
300
|
-
|
304
|
+
llm=llm,
|
305
|
+
embed_model=embed_model,
|
301
306
|
) # get or create index
|
302
307
|
n, errors = self.indexing.index_url(
|
303
308
|
idx=idx,
|
pygpt_net/core/idx/chat.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.01.16 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import json
|
@@ -126,8 +126,7 @@ class Chat:
|
|
126
126
|
model.id,
|
127
127
|
))
|
128
128
|
|
129
|
-
index,
|
130
|
-
llm = service_context.llm
|
129
|
+
index, llm = self.get_index(idx, model)
|
131
130
|
input_tokens = self.window.core.tokens.from_llama_messages(
|
132
131
|
query,
|
133
132
|
[],
|
@@ -192,7 +191,7 @@ class Chat:
|
|
192
191
|
query,
|
193
192
|
))
|
194
193
|
|
195
|
-
index,
|
194
|
+
index, llm = self.get_index(idx, model)
|
196
195
|
retriever = index.as_retriever()
|
197
196
|
nodes = retriever.retrieve(query)
|
198
197
|
outputs = []
|
@@ -251,8 +250,7 @@ class Chat:
|
|
251
250
|
# use index only if idx is not empty, otherwise use only LLM
|
252
251
|
index = None
|
253
252
|
if use_index:
|
254
|
-
index,
|
255
|
-
llm = service_context.llm # no multimodal LLM in service context
|
253
|
+
index, llm = self.get_index(idx, model)
|
256
254
|
else:
|
257
255
|
llm = self.window.core.idx.llm.get(model)
|
258
256
|
|
@@ -382,9 +380,8 @@ class Chat:
|
|
382
380
|
if model is None:
|
383
381
|
model = self.window.core.models.from_defaults()
|
384
382
|
|
385
|
-
|
386
|
-
|
387
|
-
tmp_id, index = self.storage.get_tmp(path, service_context=service_context) # get or create tmp index
|
383
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model)
|
384
|
+
tmp_id, index = self.storage.get_tmp(path, llm, embed_model) # get or create tmp index
|
388
385
|
|
389
386
|
idx = "tmp:{}".format(path) # tmp index id
|
390
387
|
self.log("Indexing to temporary in-memory index: {}...".format(idx))
|
@@ -443,9 +440,8 @@ class Chat:
|
|
443
440
|
id = json.dumps(parts)
|
444
441
|
if model is None:
|
445
442
|
model = self.window.core.models.from_defaults()
|
446
|
-
|
447
|
-
tmp_id, index = self.storage.get_tmp(id,
|
448
|
-
llm = context.llm
|
443
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model)
|
444
|
+
tmp_id, index = self.storage.get_tmp(id, llm, embed_model) # get or create tmp index
|
449
445
|
|
450
446
|
idx = "tmp:{}".format(id) # tmp index id
|
451
447
|
self.log("Indexing to temporary in-memory index: {}...".format(idx))
|
@@ -498,8 +494,8 @@ class Chat:
|
|
498
494
|
"""
|
499
495
|
if model is None:
|
500
496
|
model = self.window.core.models.from_defaults()
|
501
|
-
|
502
|
-
index = self.storage.get_ctx_idx(path,
|
497
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model)
|
498
|
+
index = self.storage.get_ctx_idx(path, llm, embed_model)
|
503
499
|
|
504
500
|
# 1. try to retrieve directly from index
|
505
501
|
retriever = index.as_retriever()
|
@@ -524,9 +520,9 @@ class Chat:
|
|
524
520
|
"",
|
525
521
|
history,
|
526
522
|
)
|
527
|
-
memory = self.get_memory_buffer(history,
|
523
|
+
memory = self.get_memory_buffer(history, llm)
|
528
524
|
response = index.as_chat_engine(
|
529
|
-
llm=
|
525
|
+
llm=llm,
|
530
526
|
streaming=False,
|
531
527
|
memory=memory,
|
532
528
|
).chat(query)
|
@@ -550,7 +546,7 @@ class Chat:
|
|
550
546
|
"""
|
551
547
|
if model is None:
|
552
548
|
model = self.window.core.models.from_defaults()
|
553
|
-
index,
|
549
|
+
index, llm = self.get_index(idx, model)
|
554
550
|
retriever = index.as_retriever()
|
555
551
|
nodes = retriever.retrieve(query)
|
556
552
|
response = ""
|
@@ -628,14 +624,14 @@ class Chat:
|
|
628
624
|
if not self.storage.exists(idx):
|
629
625
|
if idx is None:
|
630
626
|
# create empty in memory idx
|
631
|
-
|
627
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model)
|
632
628
|
index = self.storage.index_from_empty()
|
633
|
-
return index,
|
629
|
+
return index, llm
|
634
630
|
# raise Exception("Index not prepared")
|
635
631
|
|
636
|
-
|
637
|
-
index = self.storage.get(idx,
|
638
|
-
return index,
|
632
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model)
|
633
|
+
index = self.storage.get(idx, llm, embed_model) # get index
|
634
|
+
return index, llm
|
639
635
|
|
640
636
|
def get_metadata(
|
641
637
|
self,
|
pygpt_net/core/idx/indexing.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.01.16 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import datetime
|
@@ -446,9 +446,9 @@ class Indexing:
|
|
446
446
|
:param doc: Document
|
447
447
|
"""
|
448
448
|
# fix empty date in Pinecode
|
449
|
-
if "last_accessed_date" in doc.
|
450
|
-
if "creation_date" in doc.
|
451
|
-
doc.
|
449
|
+
if "last_accessed_date" in doc.metadata and doc.metadata["last_accessed_date"] is None:
|
450
|
+
if "creation_date" in doc.metadata:
|
451
|
+
doc.metadata["last_accessed_date"] = doc.metadata["creation_date"]
|
452
452
|
|
453
453
|
def index_files(
|
454
454
|
self,
|
@@ -1046,12 +1046,14 @@ class Indexing:
|
|
1046
1046
|
:param doc: document
|
1047
1047
|
"""
|
1048
1048
|
self.apply_rate_limit() # apply RPM limit
|
1049
|
+
"""
|
1049
1050
|
try:
|
1050
1051
|
# display embedding model info
|
1051
|
-
if index.
|
1052
|
+
if index._embed_model is not None:
|
1052
1053
|
self.window.core.idx.log("Embedding model: {}".format(index.service_context.embed_model.model_name))
|
1053
1054
|
except Exception as e:
|
1054
1055
|
self.window.core.debug.log(e)
|
1056
|
+
"""
|
1055
1057
|
index.insert(document=doc)
|
1056
1058
|
|
1057
1059
|
def index_attachment(
|
@@ -1073,10 +1075,11 @@ class Indexing:
|
|
1073
1075
|
if model is None:
|
1074
1076
|
model = self.window.core.models.from_defaults()
|
1075
1077
|
|
1076
|
-
|
1078
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model)
|
1077
1079
|
index = self.window.core.idx.storage.get_ctx_idx(
|
1078
1080
|
index_path,
|
1079
|
-
|
1081
|
+
llm=llm,
|
1082
|
+
embed_model=embed_model,
|
1080
1083
|
) # get or create ctx index
|
1081
1084
|
|
1082
1085
|
idx = "tmp:{}".format(index_path) # tmp index id
|
@@ -1114,8 +1117,8 @@ class Indexing:
|
|
1114
1117
|
if model is None:
|
1115
1118
|
model = self.window.core.models.from_defaults()
|
1116
1119
|
|
1117
|
-
|
1118
|
-
index = self.window.core.idx.storage.get_ctx_idx(index_path,
|
1120
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model)
|
1121
|
+
index = self.window.core.idx.storage.get_ctx_idx(index_path, llm, embed_model) # get or create ctx index
|
1119
1122
|
|
1120
1123
|
idx = "tmp:{}".format(index_path) # tmp index id
|
1121
1124
|
self.window.core.idx.log("Indexing to context attachment index: {}...".format(idx))
|
@@ -1168,9 +1171,8 @@ class Indexing:
|
|
1168
1171
|
:return: True if success
|
1169
1172
|
"""
|
1170
1173
|
model = self.window.core.models.from_defaults()
|
1171
|
-
|
1172
|
-
index = self.window.core.idx.storage.get_ctx_idx(index_path,
|
1173
|
-
service_context=service_context) # get or create ctx index
|
1174
|
+
llm, embed_model = self.window.core.idx.llm.get_service_context(model=model)
|
1175
|
+
index = self.window.core.idx.storage.get_ctx_idx(index_path, llm, embed_model) # get or create ctx index
|
1174
1176
|
index.delete_ref_doc(doc_id)
|
1175
1177
|
self.window.core.idx.storage.store_ctx_idx(index_path, index)
|
1176
1178
|
return True
|
pygpt_net/core/idx/llm.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
7
7
|
# MIT License #
|
8
8
|
# Created By : Marcin Szczygliński #
|
9
|
-
# Updated Date:
|
9
|
+
# Updated Date: 2025.01.16 01:00:00 #
|
10
10
|
# ================================================== #
|
11
11
|
|
12
12
|
import os.path
|
@@ -15,7 +15,6 @@ from typing import Optional, Union
|
|
15
15
|
from llama_index.core.llms.llm import BaseLLM
|
16
16
|
from llama_index.core.multi_modal_llms import MultiModalLLM
|
17
17
|
from llama_index.core.base.embeddings.base import BaseEmbedding
|
18
|
-
from llama_index.core.indices.service_context import ServiceContext
|
19
18
|
from llama_index.llms.openai import OpenAI
|
20
19
|
|
21
20
|
from pygpt_net.core.types import (
|
@@ -125,7 +124,7 @@ class Llm:
|
|
125
124
|
def get_service_context(
|
126
125
|
self,
|
127
126
|
model: Optional[ModelItem] = None
|
128
|
-
)
|
127
|
+
):
|
129
128
|
"""
|
130
129
|
Get service context + embeddings provider
|
131
130
|
|
@@ -134,11 +133,4 @@ class Llm:
|
|
134
133
|
"""
|
135
134
|
llm = self.get(model=model)
|
136
135
|
embed_model = self.get_embeddings_provider()
|
137
|
-
|
138
|
-
kwargs = {}
|
139
|
-
if llm is not None:
|
140
|
-
kwargs['llm'] = llm
|
141
|
-
if embed_model is not None:
|
142
|
-
kwargs['embed_model'] = embed_model
|
143
|
-
|
144
|
-
return ServiceContext.from_defaults(**kwargs)
|
136
|
+
return llm, embed_model
|
@@ -1,8 +1,8 @@
|
|
1
1
|
{
|
2
2
|
"__meta__": {
|
3
|
-
"version": "2.4.
|
4
|
-
"app.version": "2.4.
|
5
|
-
"updated_at": "2025-01-
|
3
|
+
"version": "2.4.49",
|
4
|
+
"app.version": "2.4.49",
|
5
|
+
"updated_at": "2025-01-16T00:00:00"
|
6
6
|
},
|
7
7
|
"access.audio.event.speech": false,
|
8
8
|
"access.audio.event.speech.disabled": [],
|
pygpt_net/data/config/modes.json
CHANGED
@@ -596,10 +596,11 @@ menu.info = Über
|
|
596
596
|
menu.info.about = Über
|
597
597
|
menu.info.changelog = Änderungsprotokoll
|
598
598
|
menu.info.docs = Dokumentation
|
599
|
-
menu.info.donate = Unterstützen
|
599
|
+
menu.info.donate = Unterstützen PyGPT
|
600
600
|
menu.info.github = GitHub-Projektseite
|
601
601
|
menu.info.license = Lizenz
|
602
602
|
menu.info.pypi = PyPi
|
603
|
+
menu.info.report = Einen Fehler melden
|
603
604
|
menu.info.snap = Snap Store
|
604
605
|
menu.info.updates = Nach Updates suchen...
|
605
606
|
menu.info.website = Offizielle Webseite - pygpt.net
|
@@ -730,10 +730,11 @@ menu.info.about = About
|
|
730
730
|
menu.info.changelog = Changelog
|
731
731
|
menu.info.discord = Discord
|
732
732
|
menu.info.docs = Documentation
|
733
|
-
menu.info.donate = Donate
|
734
|
-
menu.info.github = GitHub
|
733
|
+
menu.info.donate = Donate PyGPT
|
734
|
+
menu.info.github = GitHub
|
735
735
|
menu.info.license = License
|
736
736
|
menu.info.pypi = PyPi
|
737
|
+
menu.info.report = Report a bug
|
737
738
|
menu.info.snap = Snap Store
|
738
739
|
menu.info.updates = Check for Updates...
|
739
740
|
menu.info.website = Project Website - pygpt.net
|
@@ -1216,8 +1217,8 @@ toolbox.indexes.label = Index
|
|
1216
1217
|
toolbox.llama_index.label = LlamaIndex
|
1217
1218
|
toolbox.llama_index.mode.chat = Chat
|
1218
1219
|
toolbox.llama_index.mode.label = Mode
|
1219
|
-
toolbox.llama_index.mode.retrieval = Retrieve Only
|
1220
1220
|
toolbox.llama_index.mode.query = Query the Index Only
|
1221
|
+
toolbox.llama_index.mode.retrieval = Retrieve Only
|
1221
1222
|
toolbox.mode.label = Mode
|
1222
1223
|
toolbox.model.label = Model
|
1223
1224
|
toolbox.name.ai = AI Name
|
@@ -595,10 +595,11 @@ menu.info = Acerca de
|
|
595
595
|
menu.info.about = Acerca de
|
596
596
|
menu.info.changelog = Registro de cambios
|
597
597
|
menu.info.docs = Documentación
|
598
|
-
menu.info.donate = Apoya
|
598
|
+
menu.info.donate = Apoya PyGPT
|
599
599
|
menu.info.github = Página del proyecto en GitHub
|
600
600
|
menu.info.license = Licencia
|
601
601
|
menu.info.pypi = PyPi
|
602
|
+
menu.info.report = Reportar un error
|
602
603
|
menu.info.snap = Tienda Snap
|
603
604
|
menu.info.updates = Buscar actualizaciones...
|
604
605
|
menu.info.website = Sitio web oficial - pygpt.net
|
@@ -596,10 +596,11 @@ menu.info = À propos
|
|
596
596
|
menu.info.about = À propos
|
597
597
|
menu.info.changelog = Historique des changements
|
598
598
|
menu.info.docs = Documentation
|
599
|
-
menu.info.donate = Soutenez
|
599
|
+
menu.info.donate = Soutenez PyGPT
|
600
600
|
menu.info.github = Page du projet GitHub
|
601
601
|
menu.info.license = Licence
|
602
602
|
menu.info.pypi = PyPi
|
603
|
+
menu.info.report = Signaler un bug
|
603
604
|
menu.info.snap = Snap Store
|
604
605
|
menu.info.updates = Vérifier les mises à jour...
|
605
606
|
menu.info.website = Site officiel - pygpt.net
|
@@ -596,10 +596,11 @@ menu.info = Informazioni
|
|
596
596
|
menu.info.about = Circa
|
597
597
|
menu.info.changelog = Cronologia delle versioni
|
598
598
|
menu.info.docs = Documentazione
|
599
|
-
menu.info.donate = Supporta
|
599
|
+
menu.info.donate = Supporta PyGPT
|
600
600
|
menu.info.github = Pagina del progetto su GitHub
|
601
601
|
menu.info.license = Licenza
|
602
602
|
menu.info.pypi = PyPi
|
603
|
+
menu.info.report = Segnala un bug
|
603
604
|
menu.info.snap = Snap Store
|
604
605
|
menu.info.updates = Verifica aggiornamenti...
|
605
606
|
menu.info.website = Sito ufficiale - pygpt.net
|