pygpt-net 2.5.90__py3-none-any.whl → 2.5.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +9 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/access/voice.py +2 -3
- pygpt_net/controller/audio/audio.py +7 -4
- pygpt_net/controller/ctx/extra.py +2 -7
- pygpt_net/controller/kernel/kernel.py +1 -0
- pygpt_net/core/audio/audio.py +83 -1
- pygpt_net/core/audio/backend/native.py +92 -30
- pygpt_net/data/config/config.json +5 -3
- pygpt_net/data/config/models.json +168 -3
- pygpt_net/data/config/settings.json +23 -1
- pygpt_net/data/locale/locale.de.ini +5 -0
- pygpt_net/data/locale/locale.en.ini +5 -0
- pygpt_net/data/locale/locale.es.ini +5 -0
- pygpt_net/data/locale/locale.fr.ini +5 -0
- pygpt_net/data/locale/locale.it.ini +5 -0
- pygpt_net/data/locale/locale.pl.ini +6 -1
- pygpt_net/data/locale/locale.uk.ini +5 -0
- pygpt_net/data/locale/locale.zh.ini +5 -0
- pygpt_net/plugin/audio_output/plugin.py +21 -1
- pygpt_net/provider/audio_output/eleven_labs.py +2 -2
- pygpt_net/provider/audio_output/google_tts.py +2 -2
- pygpt_net/provider/audio_output/ms_azure_tts.py +2 -2
- pygpt_net/provider/audio_output/openai_tts.py +2 -2
- pygpt_net/provider/core/config/json_file.py +10 -2
- pygpt_net/provider/core/config/patch.py +15 -1
- pygpt_net/provider/core/model/patch.py +11 -0
- {pygpt_net-2.5.90.dist-info → pygpt_net-2.5.92.dist-info}/METADATA +22 -6
- {pygpt_net-2.5.90.dist-info → pygpt_net-2.5.92.dist-info}/RECORD +32 -32
- {pygpt_net-2.5.90.dist-info → pygpt_net-2.5.92.dist-info}/LICENSE +0 -0
- {pygpt_net-2.5.90.dist-info → pygpt_net-2.5.92.dist-info}/WHEEL +0 -0
- {pygpt_net-2.5.90.dist-info → pygpt_net-2.5.92.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt
CHANGED
|
@@ -1,3 +1,12 @@
|
|
|
1
|
+
2.5.92 (2025-08-08)
|
|
2
|
+
|
|
3
|
+
- Added max files to store config option in Audio -> Cache.
|
|
4
|
+
|
|
5
|
+
2.5.91 (2025-08-08)
|
|
6
|
+
|
|
7
|
+
- Added GPT-5.
|
|
8
|
+
- Added audio cache - #118.
|
|
9
|
+
|
|
1
10
|
2.5.90 (2025-08-07)
|
|
2
11
|
|
|
3
12
|
- Fix: Initialize context summary if a conversation starts with a tool call.
|
pygpt_net/__init__.py
CHANGED
|
@@ -6,15 +6,15 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.08 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
__author__ = "Marcin Szczygliński"
|
|
13
13
|
__copyright__ = "Copyright 2025, Marcin Szczygliński"
|
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
|
15
15
|
__license__ = "MIT"
|
|
16
|
-
__version__ = "2.5.
|
|
17
|
-
__build__ = "2025-08-
|
|
16
|
+
__version__ = "2.5.92"
|
|
17
|
+
__build__ = "2025-08-08"
|
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
|
20
20
|
__report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.07
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional, List, Dict, Any
|
|
@@ -231,8 +231,7 @@ class Voice(QObject):
|
|
|
231
231
|
self.switch_btn_stop()
|
|
232
232
|
|
|
233
233
|
# stop audio output if playing
|
|
234
|
-
|
|
235
|
-
self.window.controller.audio.stop_output()
|
|
234
|
+
self.window.controller.audio.stop_output()
|
|
236
235
|
|
|
237
236
|
# set audio volume bar
|
|
238
237
|
self.window.core.audio.capture.set_bar(
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -291,7 +291,7 @@ class Audio:
|
|
|
291
291
|
|
|
292
292
|
if use_cache:
|
|
293
293
|
lang = self.window.core.config.get("lang")
|
|
294
|
-
cache_dir = os.path.join(self.window.core.
|
|
294
|
+
cache_dir = os.path.join(self.window.core.audio.get_cache_dir(), lang)
|
|
295
295
|
if not os.path.exists(cache_dir):
|
|
296
296
|
os.makedirs(cache_dir, exist_ok=True)
|
|
297
297
|
cache_file = os.path.join(str(cache_dir), event.name + ".wav")
|
|
@@ -318,7 +318,7 @@ class Audio:
|
|
|
318
318
|
)
|
|
319
319
|
return
|
|
320
320
|
|
|
321
|
-
cache_dir =
|
|
321
|
+
cache_dir = self.window.core.audio.get_cache_dir()
|
|
322
322
|
if os.path.exists(cache_dir):
|
|
323
323
|
import shutil
|
|
324
324
|
shutil.rmtree(cache_dir)
|
|
@@ -352,7 +352,10 @@ class Audio:
|
|
|
352
352
|
|
|
353
353
|
:param text: text to play
|
|
354
354
|
"""
|
|
355
|
-
|
|
355
|
+
if text:
|
|
356
|
+
self.window.update_status(trans("status.audio.start"))
|
|
357
|
+
else:
|
|
358
|
+
self.window.update_status("")
|
|
356
359
|
QApplication.processEvents() # process events to update UI
|
|
357
360
|
|
|
358
361
|
def on_play(self, event: str):
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.07
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from PySide6.QtWidgets import QApplication
|
|
@@ -220,12 +220,7 @@ class Extra:
|
|
|
220
220
|
self.window.controller.audio.read_text(item.output)
|
|
221
221
|
self.audio_play_id = item_id
|
|
222
222
|
elif self.audio_play_id == item_id:
|
|
223
|
-
|
|
224
|
-
self.window.controller.audio.read_text(item.output)
|
|
225
|
-
else:
|
|
226
|
-
self.window.controller.audio.stop_output()
|
|
227
|
-
self.window.controller.audio.on_stop()
|
|
228
|
-
self.audio_play_id = None
|
|
223
|
+
self.window.controller.audio.read_text(item.output)
|
|
229
224
|
|
|
230
225
|
def join_item(
|
|
231
226
|
self,
|
pygpt_net/core/audio/audio.py
CHANGED
|
@@ -6,9 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.07
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
+
import hashlib
|
|
13
|
+
import os
|
|
12
14
|
import re
|
|
13
15
|
from typing import Union, Optional, Tuple, List
|
|
14
16
|
|
|
@@ -21,6 +23,9 @@ from .whisper import Whisper
|
|
|
21
23
|
|
|
22
24
|
|
|
23
25
|
class Audio:
|
|
26
|
+
|
|
27
|
+
CACHE_FORMAT = "mp3" # default cache format
|
|
28
|
+
|
|
24
29
|
def __init__(self, window=None):
|
|
25
30
|
"""
|
|
26
31
|
Audio input/output core
|
|
@@ -173,3 +178,80 @@ class Audio:
|
|
|
173
178
|
:return: Error
|
|
174
179
|
"""
|
|
175
180
|
return self.last_error
|
|
181
|
+
|
|
182
|
+
def prepare_cache_path(self, content: str) -> Tuple[Union[str, None], bool]:
|
|
183
|
+
"""
|
|
184
|
+
Prepare unique cache file name for given content
|
|
185
|
+
|
|
186
|
+
:param content: text content to generate cache file name
|
|
187
|
+
:return: cache file path or None if content is empty
|
|
188
|
+
"""
|
|
189
|
+
exists = False
|
|
190
|
+
if not content:
|
|
191
|
+
return None, exists
|
|
192
|
+
sha1sum = hashlib.sha1(content.encode('utf-8')).hexdigest()
|
|
193
|
+
filename = f"{sha1sum}." + self.CACHE_FORMAT
|
|
194
|
+
tmp_dir = self.get_cache_dir()
|
|
195
|
+
path = os.path.join(tmp_dir, filename)
|
|
196
|
+
if os.path.exists(path):
|
|
197
|
+
exists = True
|
|
198
|
+
return str(path), exists
|
|
199
|
+
|
|
200
|
+
def get_cache_dir(self) -> str:
|
|
201
|
+
"""
|
|
202
|
+
Get cache directory for audio files
|
|
203
|
+
|
|
204
|
+
:return: audio cache directory path
|
|
205
|
+
"""
|
|
206
|
+
dir = self.window.core.config.get_user_dir("tmp")
|
|
207
|
+
if not os.path.exists(dir):
|
|
208
|
+
os.makedirs(dir, exist_ok=True)
|
|
209
|
+
tmp_dir = os.path.join(dir, "audio_cache")
|
|
210
|
+
if not os.path.exists(tmp_dir):
|
|
211
|
+
os.makedirs(tmp_dir, exist_ok=True)
|
|
212
|
+
return tmp_dir
|
|
213
|
+
|
|
214
|
+
def delete_old_cache(self, max_files: int = 10):
|
|
215
|
+
"""
|
|
216
|
+
Delete old cache files, keeping only the most recent ones.
|
|
217
|
+
|
|
218
|
+
:param max_files: Maximum number of cache files to keep.
|
|
219
|
+
"""
|
|
220
|
+
tmp_dir = self.get_cache_dir()
|
|
221
|
+
files = [os.path.join(tmp_dir, f) for f in os.listdir(tmp_dir) if f.endswith('.' + self.CACHE_FORMAT)]
|
|
222
|
+
files.sort(key=os.path.getmtime, reverse=True)
|
|
223
|
+
for file in files[max_files:]:
|
|
224
|
+
try:
|
|
225
|
+
os.remove(file)
|
|
226
|
+
except Exception as e:
|
|
227
|
+
print(f"Error deleting cache file {file}: {e}")
|
|
228
|
+
|
|
229
|
+
def mp3_to_wav(
|
|
230
|
+
self,
|
|
231
|
+
src_file: str,
|
|
232
|
+
dst_file: Optional[str] = None
|
|
233
|
+
) -> Union[str, None]:
|
|
234
|
+
"""
|
|
235
|
+
Convert MP3 file to WAV format
|
|
236
|
+
|
|
237
|
+
:param src_file: Path to the source MP3 file
|
|
238
|
+
:param dst_file: Optional path for the destination WAV file.
|
|
239
|
+
:return: Path to the converted WAV file or None if conversion fails.
|
|
240
|
+
"""
|
|
241
|
+
from pydub import AudioSegment
|
|
242
|
+
try:
|
|
243
|
+
mp3_audio = AudioSegment.from_mp3(src_file)
|
|
244
|
+
except Exception as e:
|
|
245
|
+
print(f"Error loading mp3 file: {e}")
|
|
246
|
+
print("Please install ffmpeg to handle mp3 files: https://ffmpeg.org/")
|
|
247
|
+
return
|
|
248
|
+
if dst_file is None:
|
|
249
|
+
dir = os.path.dirname(src_file)
|
|
250
|
+
filename = os.path.splitext(os.path.basename(src_file))[0] + ".wav"
|
|
251
|
+
dst_file = os.path.join(dir, filename)
|
|
252
|
+
try:
|
|
253
|
+
mp3_audio.export(dst_file, format="wav")
|
|
254
|
+
return str(dst_file)
|
|
255
|
+
except Exception as e:
|
|
256
|
+
print(f"Error exporting wav file: {e}")
|
|
257
|
+
return
|
|
@@ -6,18 +6,21 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.07
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
+
import os
|
|
12
13
|
import time
|
|
13
14
|
from typing import List, Tuple
|
|
14
15
|
|
|
15
16
|
from PySide6.QtCore import QTimer, QObject
|
|
17
|
+
from pydub import AudioSegment
|
|
16
18
|
|
|
17
19
|
|
|
18
20
|
class NativeBackend(QObject):
|
|
19
21
|
|
|
20
22
|
MIN_FRAMES = 25 # minimum frames to start transcription
|
|
23
|
+
AUTO_CONVERT_TO_WAV = False # automatically convert to WAV format
|
|
21
24
|
|
|
22
25
|
def __init__(self, window=None):
|
|
23
26
|
"""
|
|
@@ -42,7 +45,10 @@ class NativeBackend(QObject):
|
|
|
42
45
|
self.stop_callback = None
|
|
43
46
|
self.player = None
|
|
44
47
|
self.playback_timer = None
|
|
48
|
+
self.volume_timer = None
|
|
45
49
|
self.audio_output = None
|
|
50
|
+
self.envelope = []
|
|
51
|
+
self.chunk_ms = 10
|
|
46
52
|
|
|
47
53
|
# Get configuration values (use defaults if unavailable)
|
|
48
54
|
if self.window is not None and hasattr(self.window, "core"):
|
|
@@ -457,8 +463,6 @@ class NativeBackend(QObject):
|
|
|
457
463
|
:param signals: Signals to emit on playback
|
|
458
464
|
:return: True if started
|
|
459
465
|
"""
|
|
460
|
-
import os
|
|
461
|
-
from pydub import AudioSegment
|
|
462
466
|
from PySide6.QtMultimedia import QMediaPlayer, QAudioOutput, QMediaDevices
|
|
463
467
|
from PySide6.QtCore import QUrl, QTimer
|
|
464
468
|
if signals is not None:
|
|
@@ -476,42 +480,57 @@ class NativeBackend(QObject):
|
|
|
476
480
|
selected_device = devices[num_device] if num_device < len(devices) else devices[0]
|
|
477
481
|
self.audio_output.setDevice(selected_device)
|
|
478
482
|
|
|
479
|
-
self.
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
return
|
|
488
|
-
|
|
489
|
-
base_dir = self.window.core.config.get_user_path()
|
|
490
|
-
base_name = os.path.splitext(os.path.basename(audio_file))[0]
|
|
491
|
-
wav_file = os.path.join(base_dir, "_" + base_name + ".wav")
|
|
492
|
-
try:
|
|
493
|
-
mp3_audio.export(wav_file, format="wav")
|
|
494
|
-
except Exception as e:
|
|
495
|
-
print(f"Error exporting wav file: {e}")
|
|
496
|
-
return
|
|
497
|
-
|
|
498
|
-
self.player.setSource(QUrl.fromLocalFile(wav_file))
|
|
499
|
-
else:
|
|
500
|
-
self.player.setSource(QUrl.fromLocalFile(audio_file))
|
|
501
|
-
|
|
502
|
-
self.player.play()
|
|
503
|
-
self.playback_timer = QTimer()
|
|
504
|
-
self.playback_timer.setInterval(100)
|
|
483
|
+
if self.AUTO_CONVERT_TO_WAV:
|
|
484
|
+
if audio_file.lower().endswith('.mp3'):
|
|
485
|
+
tmp_dir = self.window.core.audio.get_cache_dir()
|
|
486
|
+
base_name = os.path.splitext(os.path.basename(audio_file))[0]
|
|
487
|
+
dst_file = os.path.join(tmp_dir, "_" + base_name + ".wav")
|
|
488
|
+
wav_file = self.window.core.audio.mp3_to_wav(audio_file, dst_file)
|
|
489
|
+
if wav_file:
|
|
490
|
+
audio_file = wav_file
|
|
505
491
|
|
|
506
492
|
def check_stop():
|
|
507
493
|
if stopped():
|
|
508
494
|
self.player.stop()
|
|
509
|
-
self.
|
|
495
|
+
self.stop_timers()
|
|
496
|
+
signals.volume_changed.emit(0)
|
|
497
|
+
else:
|
|
498
|
+
if self.player:
|
|
499
|
+
if self.player.playbackState() == QMediaPlayer.StoppedState:
|
|
500
|
+
self.player.stop()
|
|
501
|
+
self.stop_timers()
|
|
502
|
+
signals.volume_changed.emit(0)
|
|
510
503
|
|
|
504
|
+
self.envelope = self.calculate_envelope(audio_file, self.chunk_ms)
|
|
505
|
+
self.player = QMediaPlayer()
|
|
506
|
+
self.player.setAudioOutput(self.audio_output)
|
|
507
|
+
self.player.setSource(QUrl.fromLocalFile(audio_file))
|
|
508
|
+
self.player.play()
|
|
509
|
+
|
|
510
|
+
self.playback_timer = QTimer()
|
|
511
|
+
self.playback_timer.setInterval(100)
|
|
511
512
|
self.playback_timer.timeout.connect(check_stop)
|
|
513
|
+
self.volume_timer = QTimer(self)
|
|
514
|
+
self.volume_timer.setInterval(10) # every 100 ms
|
|
515
|
+
self.volume_timer.timeout.connect(
|
|
516
|
+
lambda: self.update_volume(signals)
|
|
517
|
+
)
|
|
518
|
+
|
|
512
519
|
self.playback_timer.start()
|
|
520
|
+
self.volume_timer.start()
|
|
513
521
|
signals.volume_changed.emit(0)
|
|
514
522
|
|
|
523
|
+
def stop_timers(self):
|
|
524
|
+
"""
|
|
525
|
+
Stop playback timers.
|
|
526
|
+
"""
|
|
527
|
+
if self.playback_timer is not None:
|
|
528
|
+
self.playback_timer.stop()
|
|
529
|
+
self.playback_timer = None
|
|
530
|
+
if self.volume_timer is not None:
|
|
531
|
+
self.volume_timer.stop()
|
|
532
|
+
self.volume_timer = None
|
|
533
|
+
|
|
515
534
|
def play(
|
|
516
535
|
self,
|
|
517
536
|
audio_file: str,
|
|
@@ -543,6 +562,49 @@ class NativeBackend(QObject):
|
|
|
543
562
|
self.playback_timer.stop()
|
|
544
563
|
return False
|
|
545
564
|
|
|
565
|
+
def calculate_envelope(
|
|
566
|
+
self,
|
|
567
|
+
audio_file: str,
|
|
568
|
+
chunk_ms: int = 100
|
|
569
|
+
) -> list:
|
|
570
|
+
"""
|
|
571
|
+
Calculate the volume envelope of an audio file.
|
|
572
|
+
|
|
573
|
+
:param audio_file: Path to the audio file
|
|
574
|
+
:param chunk_ms: Size of each chunk in milliseconds
|
|
575
|
+
"""
|
|
576
|
+
import numpy as np
|
|
577
|
+
audio = AudioSegment.from_file(audio_file)
|
|
578
|
+
max_amplitude = 32767
|
|
579
|
+
envelope = []
|
|
580
|
+
|
|
581
|
+
for ms in range(0, len(audio), chunk_ms):
|
|
582
|
+
chunk = audio[ms:ms + chunk_ms]
|
|
583
|
+
rms = chunk.rms
|
|
584
|
+
if rms > 0:
|
|
585
|
+
db = 20 * np.log10(rms / max_amplitude)
|
|
586
|
+
else:
|
|
587
|
+
db = -60
|
|
588
|
+
db = max(-60, min(0, db))
|
|
589
|
+
volume = ((db + 60) / 60) * 100
|
|
590
|
+
envelope.append(volume)
|
|
591
|
+
|
|
592
|
+
return envelope
|
|
593
|
+
|
|
594
|
+
def update_volume(self, signals=None):
|
|
595
|
+
"""
|
|
596
|
+
Update the volume based on the current position in the audio file.
|
|
597
|
+
|
|
598
|
+
:param signals: Signals object to emit volume changed event.
|
|
599
|
+
"""
|
|
600
|
+
pos = self.player.position()
|
|
601
|
+
index = int(pos / self.chunk_ms)
|
|
602
|
+
if index < len(self.envelope):
|
|
603
|
+
volume = self.envelope[index]
|
|
604
|
+
else:
|
|
605
|
+
volume = 0
|
|
606
|
+
signals.volume_changed.emit(volume)
|
|
607
|
+
|
|
546
608
|
def get_input_devices(self) -> List[Tuple[int, str]]:
|
|
547
609
|
"""
|
|
548
610
|
Get input devices
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.5.
|
|
4
|
-
"app.version": "2.5.
|
|
5
|
-
"updated_at": "2025-08-
|
|
3
|
+
"version": "2.5.92",
|
|
4
|
+
"app.version": "2.5.92",
|
|
5
|
+
"updated_at": "2025-08-08T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -94,6 +94,8 @@
|
|
|
94
94
|
"attachments_auto_index": true,
|
|
95
95
|
"attachments_send_clear": true,
|
|
96
96
|
"attachments_capture_clear": true,
|
|
97
|
+
"audio.cache.enabled": true,
|
|
98
|
+
"audio.cache.max_files": 1000,
|
|
97
99
|
"audio.input.backend": "native",
|
|
98
100
|
"audio.input.channels": 1,
|
|
99
101
|
"audio.input.continuous": false,
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.5.
|
|
4
|
-
"app.version": "2.5.
|
|
5
|
-
"updated_at": "2025-08-
|
|
3
|
+
"version": "2.5.92",
|
|
4
|
+
"app.version": "2.5.92",
|
|
5
|
+
"updated_at": "2025-08-08T23:07:35"
|
|
6
6
|
},
|
|
7
7
|
"items": {
|
|
8
8
|
"SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
|
|
@@ -2028,6 +2028,171 @@
|
|
|
2028
2028
|
"provider": "openai",
|
|
2029
2029
|
"tool_calls": true
|
|
2030
2030
|
},
|
|
2031
|
+
"gpt-5": {
|
|
2032
|
+
"id": "gpt-5",
|
|
2033
|
+
"name": "gpt-5",
|
|
2034
|
+
"mode": [
|
|
2035
|
+
"chat",
|
|
2036
|
+
"assistant",
|
|
2037
|
+
"llama_index",
|
|
2038
|
+
"vision",
|
|
2039
|
+
"agent",
|
|
2040
|
+
"agent_llama",
|
|
2041
|
+
"expert",
|
|
2042
|
+
"agent_openai"
|
|
2043
|
+
],
|
|
2044
|
+
"llama_index": {
|
|
2045
|
+
"args": [
|
|
2046
|
+
{
|
|
2047
|
+
"name": "model",
|
|
2048
|
+
"value": "gpt-5",
|
|
2049
|
+
"type": "str"
|
|
2050
|
+
}
|
|
2051
|
+
],
|
|
2052
|
+
"env": [
|
|
2053
|
+
{
|
|
2054
|
+
"name": "OPENAI_API_KEY",
|
|
2055
|
+
"value": "{api_key}"
|
|
2056
|
+
},
|
|
2057
|
+
{
|
|
2058
|
+
"name": "OPENAI_API_BASE",
|
|
2059
|
+
"value": "{api_endpoint}"
|
|
2060
|
+
},
|
|
2061
|
+
{
|
|
2062
|
+
"name": "AZURE_OPENAI_ENDPOINT",
|
|
2063
|
+
"value": "{api_azure_endpoint}"
|
|
2064
|
+
},
|
|
2065
|
+
{
|
|
2066
|
+
"name": "OPENAI_API_VERSION",
|
|
2067
|
+
"value": "{api_azure_version}"
|
|
2068
|
+
}
|
|
2069
|
+
]
|
|
2070
|
+
},
|
|
2071
|
+
"ctx": 400000,
|
|
2072
|
+
"tokens": 128000,
|
|
2073
|
+
"default": true,
|
|
2074
|
+
"input": [
|
|
2075
|
+
"text",
|
|
2076
|
+
"image"
|
|
2077
|
+
],
|
|
2078
|
+
"output": [
|
|
2079
|
+
"text"
|
|
2080
|
+
],
|
|
2081
|
+
"extra": {},
|
|
2082
|
+
"imported": false,
|
|
2083
|
+
"provider": "openai",
|
|
2084
|
+
"tool_calls": true
|
|
2085
|
+
},
|
|
2086
|
+
"gpt-5-mini": {
|
|
2087
|
+
"id": "gpt-5-mini",
|
|
2088
|
+
"name": "gpt-5-mini",
|
|
2089
|
+
"mode": [
|
|
2090
|
+
"chat",
|
|
2091
|
+
"assistant",
|
|
2092
|
+
"llama_index",
|
|
2093
|
+
"vision",
|
|
2094
|
+
"agent",
|
|
2095
|
+
"agent_llama",
|
|
2096
|
+
"expert",
|
|
2097
|
+
"agent_openai"
|
|
2098
|
+
],
|
|
2099
|
+
"llama_index": {
|
|
2100
|
+
"args": [
|
|
2101
|
+
{
|
|
2102
|
+
"name": "model",
|
|
2103
|
+
"value": "gpt-5-mini",
|
|
2104
|
+
"type": "str"
|
|
2105
|
+
}
|
|
2106
|
+
],
|
|
2107
|
+
"env": [
|
|
2108
|
+
{
|
|
2109
|
+
"name": "OPENAI_API_KEY",
|
|
2110
|
+
"value": "{api_key}"
|
|
2111
|
+
},
|
|
2112
|
+
{
|
|
2113
|
+
"name": "OPENAI_API_BASE",
|
|
2114
|
+
"value": "{api_endpoint}"
|
|
2115
|
+
},
|
|
2116
|
+
{
|
|
2117
|
+
"name": "AZURE_OPENAI_ENDPOINT",
|
|
2118
|
+
"value": "{api_azure_endpoint}"
|
|
2119
|
+
},
|
|
2120
|
+
{
|
|
2121
|
+
"name": "OPENAI_API_VERSION",
|
|
2122
|
+
"value": "{api_azure_version}"
|
|
2123
|
+
}
|
|
2124
|
+
]
|
|
2125
|
+
},
|
|
2126
|
+
"ctx": 400000,
|
|
2127
|
+
"tokens": 128000,
|
|
2128
|
+
"default": true,
|
|
2129
|
+
"input": [
|
|
2130
|
+
"text",
|
|
2131
|
+
"image"
|
|
2132
|
+
],
|
|
2133
|
+
"output": [
|
|
2134
|
+
"text"
|
|
2135
|
+
],
|
|
2136
|
+
"extra": {},
|
|
2137
|
+
"imported": false,
|
|
2138
|
+
"provider": "openai",
|
|
2139
|
+
"tool_calls": true
|
|
2140
|
+
},
|
|
2141
|
+
"gpt-5-nano": {
|
|
2142
|
+
"id": "gpt-5-nano",
|
|
2143
|
+
"name": "gpt-5-nano",
|
|
2144
|
+
"mode": [
|
|
2145
|
+
"chat",
|
|
2146
|
+
"assistant",
|
|
2147
|
+
"llama_index",
|
|
2148
|
+
"vision",
|
|
2149
|
+
"agent",
|
|
2150
|
+
"agent_llama",
|
|
2151
|
+
"expert",
|
|
2152
|
+
"agent_openai"
|
|
2153
|
+
],
|
|
2154
|
+
"llama_index": {
|
|
2155
|
+
"args": [
|
|
2156
|
+
{
|
|
2157
|
+
"name": "model",
|
|
2158
|
+
"value": "gpt-5-nano",
|
|
2159
|
+
"type": "str"
|
|
2160
|
+
}
|
|
2161
|
+
],
|
|
2162
|
+
"env": [
|
|
2163
|
+
{
|
|
2164
|
+
"name": "OPENAI_API_KEY",
|
|
2165
|
+
"value": "{api_key}"
|
|
2166
|
+
},
|
|
2167
|
+
{
|
|
2168
|
+
"name": "OPENAI_API_BASE",
|
|
2169
|
+
"value": "{api_endpoint}"
|
|
2170
|
+
},
|
|
2171
|
+
{
|
|
2172
|
+
"name": "AZURE_OPENAI_ENDPOINT",
|
|
2173
|
+
"value": "{api_azure_endpoint}"
|
|
2174
|
+
},
|
|
2175
|
+
{
|
|
2176
|
+
"name": "OPENAI_API_VERSION",
|
|
2177
|
+
"value": "{api_azure_version}"
|
|
2178
|
+
}
|
|
2179
|
+
]
|
|
2180
|
+
},
|
|
2181
|
+
"ctx": 400000,
|
|
2182
|
+
"tokens": 128000,
|
|
2183
|
+
"default": true,
|
|
2184
|
+
"input": [
|
|
2185
|
+
"text",
|
|
2186
|
+
"image"
|
|
2187
|
+
],
|
|
2188
|
+
"output": [
|
|
2189
|
+
"text"
|
|
2190
|
+
],
|
|
2191
|
+
"extra": {},
|
|
2192
|
+
"imported": false,
|
|
2193
|
+
"provider": "openai",
|
|
2194
|
+
"tool_calls": true
|
|
2195
|
+
},
|
|
2031
2196
|
"gpt-image-1": {
|
|
2032
2197
|
"id": "gpt-image-1",
|
|
2033
2198
|
"name": "gpt-image-1",
|
|
@@ -1373,7 +1373,7 @@
|
|
|
1373
1373
|
"step": 1,
|
|
1374
1374
|
"advanced": false,
|
|
1375
1375
|
"tab": "device"
|
|
1376
|
-
},
|
|
1376
|
+
},
|
|
1377
1377
|
"audio.input.stop_interval": {
|
|
1378
1378
|
"section": "audio",
|
|
1379
1379
|
"type": "int",
|
|
@@ -1414,6 +1414,28 @@
|
|
|
1414
1414
|
"advanced": false,
|
|
1415
1415
|
"tab": "options"
|
|
1416
1416
|
},
|
|
1417
|
+
"audio.cache.enabled": {
|
|
1418
|
+
"section": "audio",
|
|
1419
|
+
"type": "bool",
|
|
1420
|
+
"slider": true,
|
|
1421
|
+
"label": "settings.audio.cache.enabled",
|
|
1422
|
+
"description": "settings.audio.cache.enabled.desc",
|
|
1423
|
+
"value": true,
|
|
1424
|
+
"advanced": false,
|
|
1425
|
+
"tab": "cache"
|
|
1426
|
+
},
|
|
1427
|
+
"audio.cache.max_files": {
|
|
1428
|
+
"section": "audio",
|
|
1429
|
+
"type": "int",
|
|
1430
|
+
"slider": false,
|
|
1431
|
+
"label": "settings.audio.cache.max_files",
|
|
1432
|
+
"description": "settings.audio.cache.max_files.desc",
|
|
1433
|
+
"min": 0,
|
|
1434
|
+
"multiplier": 1,
|
|
1435
|
+
"value": 1000,
|
|
1436
|
+
"advanced": false,
|
|
1437
|
+
"tab": "cache"
|
|
1438
|
+
},
|
|
1417
1439
|
"remote_tools.web_search": {
|
|
1418
1440
|
"section": "remote_tools",
|
|
1419
1441
|
"type": "bool",
|
|
@@ -1025,6 +1025,10 @@ settings.api_use_responses_llama = Verwenden Sie die Responses API im Chat mit D
|
|
|
1025
1025
|
settings.api_use_responses_llama.desc = Verwende Responses API anstelle von ChatCompletions API im Chat-mit-Dateien-Modus (LlamaIndex). Nur OpenAI-Modelle.
|
|
1026
1026
|
settings.app.env = Anwendungsumgebung (os.environ)
|
|
1027
1027
|
settings.app.env.desc = Zusätzliche Umgebungsvariablen, die beim Start der Anwendung gesetzt werden sollen
|
|
1028
|
+
settings.audio.cache.enabled = Cache aktivieren
|
|
1029
|
+
settings.audio.cache.enabled.desc = Aktivieren Sie das Audio-Caching für die Spracherzeugung.
|
|
1030
|
+
settings.audio.cache.max_files = Maximale Anzahl zu speichernder Dateien
|
|
1031
|
+
settings.audio.cache.max_files.desc = Maximale Anzahl gecachter Audiodateien, die auf der Festplatte gespeichert werden
|
|
1028
1032
|
settings.audio.input.backend = Backend für Audioeingabe
|
|
1029
1033
|
settings.audio.input.backend.desc = Wählen Sie das Backend für die Audioeingabe.
|
|
1030
1034
|
settings.audio.input.channels = Kanäle
|
|
@@ -1239,6 +1243,7 @@ settings.section.api_keys.openai = OpenAI
|
|
|
1239
1243
|
settings.section.api_keys.perplexity = Perplexity
|
|
1240
1244
|
settings.section.api_keys.xai = xAI
|
|
1241
1245
|
settings.section.audio = Audio
|
|
1246
|
+
settings.section.audio.cache = Cache
|
|
1242
1247
|
settings.section.audio.device = Geräte
|
|
1243
1248
|
settings.section.audio.options = Optionen
|
|
1244
1249
|
settings.section.ctx = Kontext
|
|
@@ -1031,6 +1031,10 @@ settings.api_use_responses_llama = Use Responses API in Chat with Files mode (Ll
|
|
|
1031
1031
|
settings.api_use_responses_llama.desc = Use Responses API instead of ChatCompletions API in Chat with Files mode (LlamaIndex). OpenAI models only.
|
|
1032
1032
|
settings.app.env = Application environment (os.environ)
|
|
1033
1033
|
settings.app.env.desc = Additional environment vars to set on application start
|
|
1034
|
+
settings.audio.cache.enabled = Enable Cache
|
|
1035
|
+
settings.audio.cache.enabled.desc = Enable audio caching for speech synthesis generation.
|
|
1036
|
+
settings.audio.cache.max_files = Max files to store
|
|
1037
|
+
settings.audio.cache.max_files.desc = Max number of cached audio files stored to disk
|
|
1034
1038
|
settings.audio.input.backend = Audio Input Backend
|
|
1035
1039
|
settings.audio.input.backend.desc = Select the audio input backend.
|
|
1036
1040
|
settings.audio.input.channels = Channels
|
|
@@ -1249,6 +1253,7 @@ settings.section.api_keys.openai = OpenAI
|
|
|
1249
1253
|
settings.section.api_keys.perplexity = Perplexity
|
|
1250
1254
|
settings.section.api_keys.xai = xAI
|
|
1251
1255
|
settings.section.audio = Audio
|
|
1256
|
+
settings.section.audio.cache = Cache
|
|
1252
1257
|
settings.section.audio.device = Devices
|
|
1253
1258
|
settings.section.audio.options = Options
|
|
1254
1259
|
settings.section.ctx = Context
|
|
@@ -1026,6 +1026,10 @@ settings.api_use_responses_llama = Usar Responses API en modo Chat con Archivos
|
|
|
1026
1026
|
settings.api_use_responses_llama.desc = Usar Responses API en lugar de ChatCompletions API en modo Chat con Archivos (LlamaIndex). Solo modelos OpenAI.
|
|
1027
1027
|
settings.app.env = Entorno de la aplicación (os.environ)
|
|
1028
1028
|
settings.app.env.desc = Variables de entorno adicionales para configurar en el inicio de la aplicación
|
|
1029
|
+
settings.audio.cache.enabled = Habilitar caché
|
|
1030
|
+
settings.audio.cache.enabled.desc = Habilitar caché de audio para la generación de síntesis de voz.
|
|
1031
|
+
settings.audio.cache.max_files = Número máximo de archivos a almacenar
|
|
1032
|
+
settings.audio.cache.max_files.desc = Número máximo de archivos de audio en caché almacenados en el disco
|
|
1029
1033
|
settings.audio.input.backend = Backend para la entrada de audio
|
|
1030
1034
|
settings.audio.input.backend.desc = Selecciona el backend para la entrada de audio.
|
|
1031
1035
|
settings.audio.input.channels = Canaux
|
|
@@ -1240,6 +1244,7 @@ settings.section.api_keys.openai = OpenAI
|
|
|
1240
1244
|
settings.section.api_keys.perplexity = Perplexity
|
|
1241
1245
|
settings.section.api_keys.xai = xAI
|
|
1242
1246
|
settings.section.audio = Audio
|
|
1247
|
+
settings.section.audio.cache = Caché
|
|
1243
1248
|
settings.section.audio.device = Dispositivos
|
|
1244
1249
|
settings.section.audio.options = Opciones
|
|
1245
1250
|
settings.section.ctx = Contexto
|
|
@@ -1025,6 +1025,10 @@ settings.api_use_responses_llama = Utiliser Responses API en mode Chat avec des
|
|
|
1025
1025
|
settings.api_use_responses_llama.desc = Utiliser Responses API au lieu de ChatCompletions API en mode Chat avec des Fichiers (LlamaIndex). Modèles OpenAI uniquement.
|
|
1026
1026
|
settings.app.env = Environnement de l'application (os.environ)
|
|
1027
1027
|
settings.app.env.desc = Variables d'environnement supplémentaires à définir au démarrage de l'application
|
|
1028
|
+
settings.audio.cache.enabled = Activer le cache
|
|
1029
|
+
settings.audio.cache.enabled.desc = Activer le cache audio pour la génération de synthèse vocale.
|
|
1030
|
+
settings.audio.cache.max_files = Nombre maximal de fichiers à stocker
|
|
1031
|
+
settings.audio.cache.max_files.desc = Nombre maximal de fichiers audio mis en cache et enregistrés sur le disque
|
|
1028
1032
|
settings.audio.input.backend = Backend pour l'entrée audio
|
|
1029
1033
|
settings.audio.input.backend.desc = Sélectionnez le backend pour l'entrée audio.
|
|
1030
1034
|
settings.audio.input.channels = Canaux
|
|
@@ -1239,6 +1243,7 @@ settings.section.api_keys.openai = OpenAI
|
|
|
1239
1243
|
settings.section.api_keys.perplexity = Perplexity
|
|
1240
1244
|
settings.section.api_keys.xai = xAI
|
|
1241
1245
|
settings.section.audio = Audio
|
|
1246
|
+
settings.section.audio.cache = Cache
|
|
1242
1247
|
settings.section.audio.device = Appareils
|
|
1243
1248
|
settings.section.audio.options = Options
|
|
1244
1249
|
settings.section.ctx = Contexte
|
|
@@ -1025,6 +1025,10 @@ settings.api_use_responses_llama = Usa il Responses API in modalità Chat con Fi
|
|
|
1025
1025
|
settings.api_use_responses_llama.desc = Usa il Responses API invece del ChatCompletions API in modalità Chat con File (LlamaIndex). Solo modelli OpenAI.
|
|
1026
1026
|
settings.app.env = Ambiente dell'applicazione (os.environ)
|
|
1027
1027
|
settings.app.env.desc = Variabili d'ambiente aggiuntive da impostare all'avvio dell'applicazione
|
|
1028
|
+
settings.audio.cache.enabled = Abilita cache
|
|
1029
|
+
settings.audio.cache.enabled.desc = Abilita la memorizzazione audio per la generazione della sintesi vocale.
|
|
1030
|
+
settings.audio.cache.max_files = Numero massimo di file da memorizzare
|
|
1031
|
+
settings.audio.cache.max_files.desc = Numero massimo di file audio in cache memorizzati su disco
|
|
1028
1032
|
settings.audio.input.backend = Backend per l'ingresso audio
|
|
1029
1033
|
settings.audio.input.backend.desc = Seleziona il backend per l'ingresso audio.
|
|
1030
1034
|
settings.audio.input.channels = Canali
|
|
@@ -1239,6 +1243,7 @@ settings.section.api_keys.openai = OpenAI
|
|
|
1239
1243
|
settings.section.api_keys.perplexity = Perplexity
|
|
1240
1244
|
settings.section.api_keys.xai = xAI
|
|
1241
1245
|
settings.section.audio = Audio
|
|
1246
|
+
settings.section.audio.cache = Cache
|
|
1242
1247
|
settings.section.audio.device = Dispositivi
|
|
1243
1248
|
settings.section.audio.options = Opzioni
|
|
1244
1249
|
settings.section.ctx = Contesto
|
|
@@ -1027,6 +1027,10 @@ settings.api_use_responses_llama = Używaj Responses API w trybie Chat z Plikami
|
|
|
1027
1027
|
settings.api_use_responses_llama.desc = Używaj Responses API zamiast ChatCompletions API w trybie Chat z Plikami (LlamaIndex). Tylko modele OpenAI.
|
|
1028
1028
|
settings.app.env = Środowisko aplikacji (os.environ)
|
|
1029
1029
|
settings.app.env.desc = Dodatkowe zmienne środowiskowe do ustawienia przy starcie aplikacji
|
|
1030
|
+
settings.audio.cache.enabled = Włącz pamięć podręczną
|
|
1031
|
+
settings.audio.cache.enabled.desc = Włącz buforowanie audio dla generowania syntezy mowy.
|
|
1032
|
+
settings.audio.cache.max_files = Maksymalna liczba plików
|
|
1033
|
+
settings.audio.cache.max_files.desc = Maksymalna liczba plików audio w pamięci podręcznej zapisywanych na dysku
|
|
1030
1034
|
settings.audio.input.backend = Backend dla wejścia audio
|
|
1031
1035
|
settings.audio.input.backend.desc = Wybierz backend dla wejścia audio.
|
|
1032
1036
|
settings.audio.input.channels = Kanały
|
|
@@ -1241,7 +1245,8 @@ settings.section.api_keys.openai = OpenAI
|
|
|
1241
1245
|
settings.section.api_keys.perplexity = Perplexity
|
|
1242
1246
|
settings.section.api_keys.xai = xAI
|
|
1243
1247
|
settings.section.audio = Audio
|
|
1244
|
-
settings.section.audio.
|
|
1248
|
+
settings.section.audio.cache = Pamięć podręczna
|
|
1249
|
+
settings.section.audio.device = Urządzenia
|
|
1245
1250
|
settings.section.audio.options = Opcje
|
|
1246
1251
|
settings.section.ctx = Kontekst
|
|
1247
1252
|
settings.section.developer = Deweloper
|
|
@@ -1025,6 +1025,10 @@ settings.api_use_responses_llama = Використовувати Responses API
|
|
|
1025
1025
|
settings.api_use_responses_llama.desc = Використовувати Responses API замість ChatCompletions API в режимі Чат з Файлами (LlamaIndex). Тільки моделі OpenAI.
|
|
1026
1026
|
settings.app.env = Оточення додатка (os.environ)
|
|
1027
1027
|
settings.app.env.desc = Додаткові змінні середовища для встановлення при запуску додатка
|
|
1028
|
+
settings.audio.cache.enabled = Увімкнути кеш
|
|
1029
|
+
settings.audio.cache.enabled.desc = Увімкнути кешування аудіо для генерації синтезу мови.
|
|
1030
|
+
settings.audio.cache.max_files = Максимальна кількість файлів для збереження
|
|
1031
|
+
settings.audio.cache.max_files.desc = Максимальна кількість кешованих аудіофайлів, збережених на диску
|
|
1028
1032
|
settings.audio.input.backend = Бекенд для аудіовходу
|
|
1029
1033
|
settings.audio.input.backend.desc = Виберіть бекенд для аудіовходу.
|
|
1030
1034
|
settings.audio.input.channels = Канали
|
|
@@ -1239,6 +1243,7 @@ settings.section.api_keys.openai = OpenAI
|
|
|
1239
1243
|
settings.section.api_keys.perplexity = Perplexity
|
|
1240
1244
|
settings.section.api_keys.xai = xAI
|
|
1241
1245
|
settings.section.audio = Аудіо
|
|
1246
|
+
settings.section.audio.cache = Кеш
|
|
1242
1247
|
settings.section.audio.device = Пристрої
|
|
1243
1248
|
settings.section.audio.options = Параметри
|
|
1244
1249
|
settings.section.ctx = Контекст
|
|
@@ -1025,6 +1025,10 @@ settings.api_use_responses_llama = 在Chat with Files模式(LlamaIndex)下
|
|
|
1025
1025
|
settings.api_use_responses_llama.desc = 在Chat with Files模式(LlamaIndex)下使用Responses API而不是ChatCompletions API。仅限OpenAI模型。
|
|
1026
1026
|
settings.app.env = 应用程序环境 (os.environ)
|
|
1027
1027
|
settings.app.env.desc = 在应用程序启动时要设置的额外环境变量
|
|
1028
|
+
settings.audio.cache.enabled = 启用缓存
|
|
1029
|
+
settings.audio.cache.enabled.desc = 启用音频缓存以进行语音合成生成。
|
|
1030
|
+
settings.audio.cache.max_files = 要存储的最大文件数
|
|
1031
|
+
settings.audio.cache.max_files.desc = 缓存到磁盘的最大音频文件数
|
|
1028
1032
|
settings.audio.input.backend = 音频输入的后端
|
|
1029
1033
|
settings.audio.input.backend.desc = 选择音频输入的后端。
|
|
1030
1034
|
settings.audio.input.channels = 声道
|
|
@@ -1239,6 +1243,7 @@ settings.section.api_keys.openai = OpenAI
|
|
|
1239
1243
|
settings.section.api_keys.perplexity = Perplexity
|
|
1240
1244
|
settings.section.api_keys.xai = xAI
|
|
1241
1245
|
settings.section.audio = 音频
|
|
1246
|
+
settings.section.audio.cache = 缓存
|
|
1242
1247
|
settings.section.audio.device = 设备
|
|
1243
1248
|
settings.section.audio.options = 选项
|
|
1244
1249
|
settings.section.ctx = 上下文
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.07
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Any
|
|
@@ -176,6 +176,24 @@ class Plugin(BasePlugin):
|
|
|
176
176
|
if not ctx.audio_read_allowed():
|
|
177
177
|
return # abort if audio read is not allowed (commands, results, etc.)
|
|
178
178
|
|
|
179
|
+
# cache ctx
|
|
180
|
+
cache_enabled = self.window.core.config.get("audio.cache.enabled", False)
|
|
181
|
+
max_files = int(self.window.core.config.get("audio.cache.max_files", 10))
|
|
182
|
+
if cache_enabled:
|
|
183
|
+
# delete old if max
|
|
184
|
+
if max_files > 0:
|
|
185
|
+
self.window.core.audio.delete_old_cache(max_files)
|
|
186
|
+
if not cache_file:
|
|
187
|
+
# gen cache file path if exists
|
|
188
|
+
tmp_cache_file, is_cached = self.window.core.audio.prepare_cache_path(text)
|
|
189
|
+
if is_cached:
|
|
190
|
+
event = Event(Event.AUDIO_PLAYBACK, ctx=ctx)
|
|
191
|
+
event.data = {"audio_file": tmp_cache_file}
|
|
192
|
+
return self.on_playback(ctx, event) # playback cached audio file
|
|
193
|
+
else:
|
|
194
|
+
if tmp_cache_file:
|
|
195
|
+
cache_file = tmp_cache_file # store cache file
|
|
196
|
+
|
|
179
197
|
try:
|
|
180
198
|
if text is not None and len(text) > 0:
|
|
181
199
|
self.stop_audio()
|
|
@@ -229,6 +247,8 @@ class Plugin(BasePlugin):
|
|
|
229
247
|
worker.signals.stop.connect(self.handle_stop)
|
|
230
248
|
worker.signals.volume_changed.connect(self.handle_volume)
|
|
231
249
|
|
|
250
|
+
self.window.controller.audio.on_begin("")
|
|
251
|
+
|
|
232
252
|
backend = self.window.core.config.get("audio.output.backend", "native")
|
|
233
253
|
if backend == "native":
|
|
234
254
|
worker.play()
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -107,7 +107,7 @@ class ElevenLabsTextToSpeech(BaseProvider):
|
|
|
107
107
|
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
|
|
108
108
|
if chunk:
|
|
109
109
|
f.write(chunk)
|
|
110
|
-
return path
|
|
110
|
+
return str(path)
|
|
111
111
|
|
|
112
112
|
def is_configured(self) -> bool:
|
|
113
113
|
"""
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
import json
|
|
12
12
|
import os
|
|
@@ -108,7 +108,7 @@ class GoogleTextToSpeech(BaseProvider):
|
|
|
108
108
|
content = json.loads(r.content)
|
|
109
109
|
with open(path, "wb") as file:
|
|
110
110
|
file.write(content.get("audioContent"))
|
|
111
|
-
return path
|
|
111
|
+
return str(path)
|
|
112
112
|
|
|
113
113
|
def is_configured(self) -> bool:
|
|
114
114
|
"""
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -111,7 +111,7 @@ class MSAzureTextToSpeech(BaseProvider):
|
|
|
111
111
|
if response.status_code == 200:
|
|
112
112
|
with open(path, "wb") as file:
|
|
113
113
|
file.write(response.content)
|
|
114
|
-
return path
|
|
114
|
+
return str(path)
|
|
115
115
|
|
|
116
116
|
def is_configured(self) -> bool:
|
|
117
117
|
"""
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -79,7 +79,7 @@ class OpenAITextToSpeech(BaseProvider):
|
|
|
79
79
|
input=text,
|
|
80
80
|
)
|
|
81
81
|
response.stream_to_file(path)
|
|
82
|
-
return path
|
|
82
|
+
return str(path)
|
|
83
83
|
|
|
84
84
|
def is_configured(self) -> bool:
|
|
85
85
|
"""
|
|
@@ -55,13 +55,21 @@ class JsonFileProvider(BaseProvider):
|
|
|
55
55
|
shutil.copyfile(src, dst)
|
|
56
56
|
print("RECOVERY: Restored config file from base config: {}".format(src))
|
|
57
57
|
|
|
58
|
-
# data
|
|
58
|
+
# data tmp
|
|
59
59
|
tmp_dir = os.path.join(self.window.core.config.get_user_dir('data'), 'tmp')
|
|
60
60
|
if not os.path.exists(tmp_dir):
|
|
61
61
|
try:
|
|
62
62
|
os.makedirs(tmp_dir, exist_ok=True)
|
|
63
63
|
except Exception as e:
|
|
64
|
-
print("WARNING: Cannot create ``{}`` temp directory: {}".format(tmp_dir, e))
|
|
64
|
+
print("WARNING: Cannot create ``{}`` data temp directory: {}".format(tmp_dir, e))
|
|
65
|
+
|
|
66
|
+
# base tmp
|
|
67
|
+
tmp_dir = os.path.join(self.window.core.config.get_user_path(), 'tmp')
|
|
68
|
+
if not os.path.exists(tmp_dir):
|
|
69
|
+
try:
|
|
70
|
+
os.makedirs(tmp_dir, exist_ok=True)
|
|
71
|
+
except Exception as e:
|
|
72
|
+
print("WARNING: Cannot create ``{}`` base temp directory: {}".format(tmp_dir, e))
|
|
65
73
|
|
|
66
74
|
def get_version(self) -> Optional[str]:
|
|
67
75
|
"""
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.07
|
|
9
|
+
# Updated Date: 2025.08.07 22:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -2184,6 +2184,20 @@ class Patch:
|
|
|
2184
2184
|
self.window.core.updater.patch_css('web-blocks.light.css', True) # force replace file
|
|
2185
2185
|
updated = True
|
|
2186
2186
|
|
|
2187
|
+
# < 2.5.91
|
|
2188
|
+
if old < parse_version("2.5.91"):
|
|
2189
|
+
print("Migrating config from < 2.5.91...")
|
|
2190
|
+
if "audio.cache.enabled" not in data:
|
|
2191
|
+
data["audio.cache.enabled"] = True
|
|
2192
|
+
updated = True
|
|
2193
|
+
|
|
2194
|
+
# < 2.5.92
|
|
2195
|
+
if old < parse_version("2.5.92"):
|
|
2196
|
+
print("Migrating config from < 2.5.92...")
|
|
2197
|
+
if "audio.cache.max_files" not in data:
|
|
2198
|
+
data["audio.cache.max_files"] = 1000
|
|
2199
|
+
updated = True
|
|
2200
|
+
|
|
2187
2201
|
# update file
|
|
2188
2202
|
migrated = False
|
|
2189
2203
|
if updated:
|
|
@@ -710,6 +710,17 @@ class Patch:
|
|
|
710
710
|
model.mode.remove("agent_openai")
|
|
711
711
|
updated = True
|
|
712
712
|
|
|
713
|
+
# < 2.5.91 <--- update names to models IDs
|
|
714
|
+
if old < parse_version("2.5.91"):
|
|
715
|
+
print("Migrating models from < 2.5.91...")
|
|
716
|
+
if "gpt-5" not in data:
|
|
717
|
+
data["gpt-5"] = base_data["gpt-5"]
|
|
718
|
+
if "gpt-5-mini" not in data:
|
|
719
|
+
data["gpt-5-mini"] = base_data["gpt-5-mini"]
|
|
720
|
+
if "gpt-5-nano" not in data:
|
|
721
|
+
data["gpt-5-nano"] = base_data["gpt-5-nano"]
|
|
722
|
+
updated = True
|
|
723
|
+
|
|
713
724
|
# update file
|
|
714
725
|
if updated:
|
|
715
726
|
data = dict(sorted(data.items()))
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: pygpt-net
|
|
3
|
-
Version: 2.5.
|
|
4
|
-
Summary: Desktop AI Assistant powered by: OpenAI o1, o3, GPT-
|
|
3
|
+
Version: 2.5.92
|
|
4
|
+
Summary: Desktop AI Assistant powered by: OpenAI GPT-5, o1, o3, GPT-4, Gemini, Claude, Grok, DeepSeek, and other models supported by Llama Index, and Ollama. Chatbot, agents, completion, image generation, vision analysis, speech-to-text, plugins, internet access, file handling, command execution and more.
|
|
5
5
|
License: MIT
|
|
6
|
-
Keywords: py_gpt,py-gpt,pygpt,desktop,app,o1,o3,gpt,gpt4,gpt-4o,gpt-4v,gpt3.5,gpt-4,gpt-4-vision,gpt-3.5,llama3,mistral,gemini,grok,deepseek,bielik,claude,tts,whisper,vision,chatgpt,dall-e,chat,chatbot,assistant,text completion,image generation,ai,api,openai,api key,langchain,llama-index,ollama,presets,ui,qt,pyside
|
|
6
|
+
Keywords: py_gpt,py-gpt,pygpt,desktop,app,o1,o3,gpt-5,gpt,gpt4,gpt-4o,gpt-4v,gpt3.5,gpt-4,gpt-4-vision,gpt-3.5,llama3,mistral,gemini,grok,deepseek,bielik,claude,tts,whisper,vision,chatgpt,dall-e,chat,chatbot,assistant,text completion,image generation,ai,api,openai,api key,langchain,llama-index,ollama,presets,ui,qt,pyside
|
|
7
7
|
Author: Marcin Szczyglinski
|
|
8
8
|
Author-email: info@pygpt.net
|
|
9
9
|
Requires-Python: >=3.10,<3.14
|
|
@@ -104,7 +104,7 @@ Description-Content-Type: text/markdown
|
|
|
104
104
|
|
|
105
105
|
[](https://snapcraft.io/pygpt)
|
|
106
106
|
|
|
107
|
-
Release: **2.5.
|
|
107
|
+
Release: **2.5.92** | build: **2025-08-08** | Python: **>=3.10, <3.14**
|
|
108
108
|
|
|
109
109
|
> Official website: https://pygpt.net | Documentation: https://pygpt.readthedocs.io
|
|
110
110
|
>
|
|
@@ -116,7 +116,7 @@ Release: **2.5.90** | build: **2025-08-07** | Python: **>=3.10, <3.14**
|
|
|
116
116
|
|
|
117
117
|
## Overview
|
|
118
118
|
|
|
119
|
-
**PyGPT** is **all-in-one** Desktop AI Assistant that provides direct interaction with OpenAI language models, including `o1`, `o3`, `gpt-4o`, `gpt-4`, `gpt-4 Vision`, and `gpt-3.5`, through the `OpenAI API`. By utilizing `LlamaIndex`, the application also supports alternative LLMs, like those available on `HuggingFace`, locally available models via `Ollama` (like `Llama 3`,`Mistral`, `DeepSeek V3/R1` or `Bielik`), `Google Gemini`, `Anthropic Claude`, `Perplexity Sonar`, and `xAI Grok`.
|
|
119
|
+
**PyGPT** is **all-in-one** Desktop AI Assistant that provides direct interaction with OpenAI language models, including `gpt-5`, `o1`, `o3`, `gpt-4o`, `gpt-4`, `gpt-4 Vision`, and `gpt-3.5`, through the `OpenAI API`. By utilizing `LlamaIndex`, the application also supports alternative LLMs, like those available on `HuggingFace`, locally available models via `Ollama` (like `Llama 3`,`Mistral`, `DeepSeek V3/R1` or `Bielik`), `Google Gemini`, `Anthropic Claude`, `Perplexity Sonar`, and `xAI Grok`.
|
|
120
120
|
|
|
121
121
|
This assistant offers multiple modes of operation such as chat, assistants, completions, and image-related tasks using `DALL-E 3` for generation and `gpt-4 Vision` for image analysis. **PyGPT** has filesystem capabilities for file I/O, can generate and run Python code, execute system commands, execute custom commands and manage file transfers. It also allows models to perform web searches with the `Google` and `Microsoft Bing`.
|
|
122
122
|
|
|
@@ -145,7 +145,7 @@ You can download compiled 64-bit versions for Windows and Linux here: https://py
|
|
|
145
145
|
- Desktop AI Assistant for `Linux`, `Windows` and `Mac`, written in Python.
|
|
146
146
|
- Works similarly to `ChatGPT`, but locally (on a desktop computer).
|
|
147
147
|
- 12 modes of operation: Chat, Chat with Files, Chat with Audio, Research (Perplexity), Completion, Image generation, Vision, Assistants, Experts, Computer use, Agents and Autonomous Mode.
|
|
148
|
-
- Supports multiple models: `o1`, `o3`, `GPT-4o`, `GPT-4`, `GPT-3.5`, and any model accessible through `LlamaIndex` and `Ollama` such as `Llama 3`, `Mistral`, `Google Gemini`, `Anthropic Claude`, `xAI Grok`, `DeepSeek V3/R1`, `Perplexity Sonar`, `Bielik`, etc.
|
|
148
|
+
- Supports multiple models: `gpt-5`, `o1`, `o3`, `GPT-4o`, `GPT-4`, `GPT-3.5`, and any model accessible through `LlamaIndex` and `Ollama` such as `Llama 3`, `Mistral`, `Google Gemini`, `Anthropic Claude`, `xAI Grok`, `DeepSeek V3/R1`, `Perplexity Sonar`, `Bielik`, etc.
|
|
149
149
|
- Chat with your own Files: integrated `LlamaIndex` support: chat with data such as: `txt`, `pdf`, `csv`, `html`, `md`, `docx`, `json`, `epub`, `xlsx`, `xml`, webpages, `Google`, `GitHub`, video/audio, images and other data types, or use conversation history as additional context provided to the model.
|
|
150
150
|
- Built-in vector databases support and automated files and data embedding.
|
|
151
151
|
- Included support features for individuals with disabilities: customizable keyboard shortcuts, voice control, and translation of on-screen actions into audio via speech synthesis.
|
|
@@ -1181,6 +1181,9 @@ PyGPT has built-in support for models (as of 2025-07-26):
|
|
|
1181
1181
|
- `gpt-4o-2024-11-20` (OpenAI)
|
|
1182
1182
|
- `gpt-4o-audio-preview` (OpenAI)
|
|
1183
1183
|
- `gpt-4o-mini` (OpenAI)
|
|
1184
|
+
- `gpt-5` (OpenAI)
|
|
1185
|
+
- `gpt-5-mini` (OpenAI)
|
|
1186
|
+
- `gpt-5-nano` (OpenAI)
|
|
1184
1187
|
- `gpt-image-1` (OpenAI)
|
|
1185
1188
|
- `grok-2-vision` (xAI)
|
|
1186
1189
|
- `grok-3` (xAI)
|
|
@@ -3292,6 +3295,10 @@ Enable/disable remote tools, like Web Search or Image generation to use in OpenA
|
|
|
3292
3295
|
|
|
3293
3296
|
- `Sampling Rate`: Sampling rate, default: 44100
|
|
3294
3297
|
|
|
3298
|
+
- `Use cache`: Use cache for generating audio files.
|
|
3299
|
+
|
|
3300
|
+
- `Max files to store`: Max files to store on disk for audio cache.
|
|
3301
|
+
|
|
3295
3302
|
**Indexes / LlamaIndex**
|
|
3296
3303
|
|
|
3297
3304
|
**General**
|
|
@@ -4351,6 +4358,15 @@ may consume additional tokens that are not displayed in the main window.
|
|
|
4351
4358
|
|
|
4352
4359
|
## Recent changes:
|
|
4353
4360
|
|
|
4361
|
+
**2.5.92 (2025-08-08)**
|
|
4362
|
+
|
|
4363
|
+
- Added max files to store config option in Audio -> Cache.
|
|
4364
|
+
|
|
4365
|
+
**2.5.91 (2025-08-08)**
|
|
4366
|
+
|
|
4367
|
+
- Added GPT-5.
|
|
4368
|
+
- Added audio cache - #118.
|
|
4369
|
+
|
|
4354
4370
|
**2.5.90 (2025-08-07)**
|
|
4355
4371
|
|
|
4356
4372
|
- Fix: Initialize context summary if a conversation starts with a tool call.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
pygpt_net/CHANGELOG.txt,sha256=
|
|
1
|
+
pygpt_net/CHANGELOG.txt,sha256=G4RISl9r-SJc1bsRAgeoUyUMokGlp1urYmOyeC_jzIE,96791
|
|
2
2
|
pygpt_net/LICENSE,sha256=dz9sfFgYahvu2NZbx4C1xCsVn9GVer2wXcMkFRBvqzY,1146
|
|
3
|
-
pygpt_net/__init__.py,sha256=
|
|
3
|
+
pygpt_net/__init__.py,sha256=At2kiZ5ezKyU9mHK_GDNkheHFgl3buN3__yfacOt_EY,1373
|
|
4
4
|
pygpt_net/app.py,sha256=86COG6F-sC9dIRURMjBPS4fkD3Zzrp5kwCFQuM4tikc,18971
|
|
5
5
|
pygpt_net/config.py,sha256=FxN39zmTpOY84lgrfhfIDuxKmNPrePhNHdGxvEOKqK8,16881
|
|
6
6
|
pygpt_net/container.py,sha256=4puHeSNIyj3q6gTjPLsQKE1U-4fAhoNANYKiDq-mTqA,4054
|
|
@@ -8,7 +8,7 @@ pygpt_net/controller/__init__.py,sha256=wtlkw4viFVuf2sP0iMSkK0jI1QMa3kcPfvQaqnC-
|
|
|
8
8
|
pygpt_net/controller/access/__init__.py,sha256=_XZxGy5U93JGU49GbIB9E_I26_uRV_Zbz18lcp7u23A,510
|
|
9
9
|
pygpt_net/controller/access/access.py,sha256=Q_p0ner14zKWjPC4Ug-V53IP9QrwJpefUMaed4g4aU4,2526
|
|
10
10
|
pygpt_net/controller/access/control.py,sha256=nMGWzg60jNJMVAHIrism0_APzVMpbLAOcXG6mJuOSJ8,17332
|
|
11
|
-
pygpt_net/controller/access/voice.py,sha256=
|
|
11
|
+
pygpt_net/controller/access/voice.py,sha256=UulM1KgqUQp601rlQoTTq7pZR4dcZOcn9JjKkPYDKPc,15757
|
|
12
12
|
pygpt_net/controller/agent/__init__.py,sha256=GRKHllr8kuzoA2_rRHiQv27znsEcwLCiuNuU4G9xVZw,509
|
|
13
13
|
pygpt_net/controller/agent/agent.py,sha256=bTlAVsJ5QPyQD525RwxV_yDiWyRZfAtTuspAdVN7hbA,1268
|
|
14
14
|
pygpt_net/controller/agent/common.py,sha256=55CHhV-dsWeNe5QvdvNoyhEYVhQNrHt_Lv-VDTuiYRc,3871
|
|
@@ -25,7 +25,7 @@ pygpt_net/controller/assistant/threads.py,sha256=ShyV2pcuJHWMCWfCd0F5LTw3mc6A_4O
|
|
|
25
25
|
pygpt_net/controller/attachment/__init__.py,sha256=-5owOyszPze-YLQuTtFLQnSwEj_ftTxFwAP_3jPNpss,514
|
|
26
26
|
pygpt_net/controller/attachment/attachment.py,sha256=to7QK1PwvvkW6gBUxXFIm0R_wh9HrgW8raZf9dJgVmo,20397
|
|
27
27
|
pygpt_net/controller/audio/__init__.py,sha256=Ci5ClV3DKuMCLtFqQEOr5qun--tlIzKkQlwj9ug_kI0,509
|
|
28
|
-
pygpt_net/controller/audio/audio.py,sha256=
|
|
28
|
+
pygpt_net/controller/audio/audio.py,sha256=rUpibBGPGp1rLW3NJ46xQEqIAC-MyQ5mALSW6YbydQ0,13668
|
|
29
29
|
pygpt_net/controller/calendar/__init__.py,sha256=AyzoNqYgxV35CMEzoi_SCSsQh4ehg_Wu_2nsK3xsbyg,512
|
|
30
30
|
pygpt_net/controller/calendar/calendar.py,sha256=s55RkCFQPFzdDoQ2zp3kohlNdpiWxdSxQtsaROeiigw,4424
|
|
31
31
|
pygpt_net/controller/calendar/note.py,sha256=y8Gkg35-aM1MfQ9P2NsRnmfSJw4Ps__9g6I1RhonR6s,11428
|
|
@@ -63,7 +63,7 @@ pygpt_net/controller/config/placeholder.py,sha256=ImlyuTtY_4__5I63peX3hshP6q1tXn
|
|
|
63
63
|
pygpt_net/controller/ctx/__init__.py,sha256=0wH7ziC75WscBW8cxpeGBwEz5tolo_kCxGPoz2udI_E,507
|
|
64
64
|
pygpt_net/controller/ctx/common.py,sha256=3W3jrsMmMWO9UHIRxfqd4Ob_eelRRCZng3_C3q7FCnQ,6534
|
|
65
65
|
pygpt_net/controller/ctx/ctx.py,sha256=nmWrsxm7ZlucB5tGDHkkCQEUDsdaDedru-0NUYkARlo,38609
|
|
66
|
-
pygpt_net/controller/ctx/extra.py,sha256=
|
|
66
|
+
pygpt_net/controller/ctx/extra.py,sha256=WApWjnIfl3SoI0VZVbptvjjqhFPJl-dSfqW12tlBHrY,8599
|
|
67
67
|
pygpt_net/controller/ctx/summarizer.py,sha256=dO-LqIclwI7gIot1yjNo9eZ0HxakWCSoqePORgCLOk8,3072
|
|
68
68
|
pygpt_net/controller/debug/__init__.py,sha256=dOJGTICjvTtrPIEDOsxCzcOHsfu8AFPLpSKbdN0q0KI,509
|
|
69
69
|
pygpt_net/controller/debug/debug.py,sha256=NGYj3e-OJ07h9weyw51sHe-txnWNWaSDGI5ofZe202U,8821
|
|
@@ -82,7 +82,7 @@ pygpt_net/controller/idx/idx.py,sha256=h8k8vGaPEl2ikA0vMrOT7RGY3XrhEpNjeHHxEUBLq
|
|
|
82
82
|
pygpt_net/controller/idx/indexer.py,sha256=xW4ruWNpoGnyfFMTU2wOfuk63RRWBFgZGKIqiDlQoA8,22347
|
|
83
83
|
pygpt_net/controller/idx/settings.py,sha256=orWd8ARxIBBs3MWJLjEKcqmrXLi6DvsLitsPvPd2fXU,7916
|
|
84
84
|
pygpt_net/controller/kernel/__init__.py,sha256=XVS7uaC0VZZkpjqkbs6pqSwXdAZWLnR3dVNCHmF7XH4,510
|
|
85
|
-
pygpt_net/controller/kernel/kernel.py,sha256=
|
|
85
|
+
pygpt_net/controller/kernel/kernel.py,sha256=XP7ymz72dxiOMdigwR3uYyf6V1NAoW9KNxhHKBKKqO8,12999
|
|
86
86
|
pygpt_net/controller/kernel/reply.py,sha256=eGt5bM7tUFXsXKm0XxNaGzUx145G9Ew_6B88W_SX_C8,6287
|
|
87
87
|
pygpt_net/controller/kernel/stack.py,sha256=HJh7jkLEYQxUHJigbjD7sJh3FvvqUTCevqGf3doc29I,4046
|
|
88
88
|
pygpt_net/controller/lang/__init__.py,sha256=Bpr9Ygoi_vd_CnfbQiZzYAeg9KV0vqNIuMl8EWmHKaM,508
|
|
@@ -168,9 +168,9 @@ pygpt_net/core/attachments/attachments.py,sha256=bUqvfPqlpdXiGf3GvS1kTE45A0Q1Eo3
|
|
|
168
168
|
pygpt_net/core/attachments/context.py,sha256=S67zDh4lD9_stmeeh1ZGNv3IMt_6_IMGxq-3Jqn9kMs,25119
|
|
169
169
|
pygpt_net/core/attachments/worker.py,sha256=Yt6DU2Yr7-FPTPTdlj8vDzT46pcKcvZoatiZybY6YEo,1685
|
|
170
170
|
pygpt_net/core/audio/__init__.py,sha256=SNShKpjqXzLhaSKxWiM8b6lasHRkrdSZ8ck-X7RJ-VY,509
|
|
171
|
-
pygpt_net/core/audio/audio.py,sha256=
|
|
171
|
+
pygpt_net/core/audio/audio.py,sha256=_WaS1JHT9S3BqmzEc7bOcWwMfth6ueMJQVs13jzoy4c,7709
|
|
172
172
|
pygpt_net/core/audio/backend/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
173
|
-
pygpt_net/core/audio/backend/native.py,sha256=
|
|
173
|
+
pygpt_net/core/audio/backend/native.py,sha256=HfkuK281CByBGQgxv343mRC-pyyxWfZrhYfcYTTEYOg,22117
|
|
174
174
|
pygpt_net/core/audio/backend/pyaudio.py,sha256=iVpkr-82OldSY_9Zar9U7hMiiBcy9kZso4h8Xcvo9pA,18487
|
|
175
175
|
pygpt_net/core/audio/backend/pygame.py,sha256=mFNzv3ADcjxcTkHpM08Xdp3ZmxYHrnZPdKvFs6gWoSk,16814
|
|
176
176
|
pygpt_net/core/audio/capture.py,sha256=Ka2jsNEzhAMqF1vmF9wqYHxT20r1XY9_8f44HcwQ0CE,3797
|
|
@@ -340,8 +340,8 @@ pygpt_net/css_rc.py,sha256=i13kX7irhbYCWZ5yJbcMmnkFp_UfS4PYnvRFSPF7XXo,11349
|
|
|
340
340
|
pygpt_net/data/audio/click_off.mp3,sha256=aNiRDP1pt-Jy7ija4YKCNFBwvGWbzU460F4pZWZDS90,65201
|
|
341
341
|
pygpt_net/data/audio/click_on.mp3,sha256=qfdsSnthAEHVXzeyN4LlC0OvXuyW8p7stb7VXtlvZ1k,65201
|
|
342
342
|
pygpt_net/data/audio/ok.mp3,sha256=LTiV32pEBkpUGBkKkcOdOFB7Eyt_QoP2Nv6c5AaXftk,32256
|
|
343
|
-
pygpt_net/data/config/config.json,sha256
|
|
344
|
-
pygpt_net/data/config/models.json,sha256=
|
|
343
|
+
pygpt_net/data/config/config.json,sha256=-lndyyNRWW_Ahdr3KSmaehRc6f7kgJ2FSELQ7IMtNx4,24750
|
|
344
|
+
pygpt_net/data/config/models.json,sha256=avI6jlTtphXMgA7U29aIY25srKvttoTC_Fzw3InCqwM,109127
|
|
345
345
|
pygpt_net/data/config/modes.json,sha256=M882iiqX_R2sNQl9cqZ3k-uneEvO9wpARtHRMLx_LHw,2265
|
|
346
346
|
pygpt_net/data/config/presets/agent_code_act.json,sha256=GYHqhxtKFLUCvRI3IJAJ7Qe1k8yD9wGGNwManldWzlI,754
|
|
347
347
|
pygpt_net/data/config/presets/agent_openai.json,sha256=vMTR-soRBiEZrpJJHuFLWyx8a3Ez_BqtqjyXgxCAM_Q,733
|
|
@@ -374,7 +374,7 @@ pygpt_net/data/config/presets/current.vision.json,sha256=x1ll5B3ROSKYQA6l27PRGXU
|
|
|
374
374
|
pygpt_net/data/config/presets/dalle_white_cat.json,sha256=esqUb43cqY8dAo7B5u99tRC0MBV5lmlrVLnJhTSkL8w,552
|
|
375
375
|
pygpt_net/data/config/presets/joke_agent.json,sha256=R6n9P7KRb0s-vZWZE7kHdlOfXAx1yYrPmUw8uLyw8OE,474
|
|
376
376
|
pygpt_net/data/config/presets/joke_expert.json,sha256=jjcoIYEOaEp8kLoIbecxQROiq4J3Zess5w8_HmngPOY,671
|
|
377
|
-
pygpt_net/data/config/settings.json,sha256=
|
|
377
|
+
pygpt_net/data/config/settings.json,sha256=9QTRQDJMijDtQPBYeXB5MDjMFMbdXOKsNFU820i6QS0,64821
|
|
378
378
|
pygpt_net/data/config/settings_section.json,sha256=Ng6kgmgxVmvt-KYFIqZvIDAEK4DfISNjNVF55DFWNjs,1082
|
|
379
379
|
pygpt_net/data/css/fix_windows.css,sha256=Mks14Vg25ncbMqZJfAMStrhvZmgHF6kU75ohTWRZeI8,664
|
|
380
380
|
pygpt_net/data/css/fix_windows.dark.css,sha256=7hGbT_qI5tphYC_WlFpJRDAcmjBb0AQ2Yc-y-_Zzf2M,161
|
|
@@ -1597,14 +1597,14 @@ pygpt_net/data/js/katex/fonts/KaTeX_Typewriter-Regular.woff,sha256=4U_tArGrp86fW
|
|
|
1597
1597
|
pygpt_net/data/js/katex/fonts/KaTeX_Typewriter-Regular.woff2,sha256=cdUX1ngneHz6vfGGkUzDNY7aU543kxlB8rL9SiH2jAs,13568
|
|
1598
1598
|
pygpt_net/data/js/katex/katex.min.css,sha256=lVaKnUaQNG4pI71WHffQZVALLQF4LMZEk4nOia8U9ow,23532
|
|
1599
1599
|
pygpt_net/data/js/katex/katex.min.js,sha256=KLASOtKS2x8pUxWVzCDmlWJ4jhuLb0vtrgakbD6gDDo,276757
|
|
1600
|
-
pygpt_net/data/locale/locale.de.ini,sha256=
|
|
1601
|
-
pygpt_net/data/locale/locale.en.ini,sha256=
|
|
1602
|
-
pygpt_net/data/locale/locale.es.ini,sha256=
|
|
1603
|
-
pygpt_net/data/locale/locale.fr.ini,sha256=
|
|
1604
|
-
pygpt_net/data/locale/locale.it.ini,sha256=
|
|
1605
|
-
pygpt_net/data/locale/locale.pl.ini,sha256=
|
|
1606
|
-
pygpt_net/data/locale/locale.uk.ini,sha256=
|
|
1607
|
-
pygpt_net/data/locale/locale.zh.ini,sha256=
|
|
1600
|
+
pygpt_net/data/locale/locale.de.ini,sha256=ZpYsSFQk-0-XHUGjJSjOd8g6Y49MbJYo89xmulHwg4A,95311
|
|
1601
|
+
pygpt_net/data/locale/locale.en.ini,sha256=QWHYX0HTzRua3HXDHi7uyCFLv1M9xBd8QOG7SS8r9bU,86457
|
|
1602
|
+
pygpt_net/data/locale/locale.es.ini,sha256=JTh5fcDTBd4l3RYe2PuiW9sRniq-Nux6oHPD3dfTGfo,96064
|
|
1603
|
+
pygpt_net/data/locale/locale.fr.ini,sha256=gOpc07xXNOzkKgMEmhCYnU4Zef4FOuDXBNZF920FKME,98511
|
|
1604
|
+
pygpt_net/data/locale/locale.it.ini,sha256=eU5uJ8rAvPB8VWTaWmKosO6CK028pN4MXrDaS5l93Sk,93890
|
|
1605
|
+
pygpt_net/data/locale/locale.pl.ini,sha256=1OoPSQnak3PTEndaxnmNHQeyzdNEarbz5gxwM-GIJmY,93640
|
|
1606
|
+
pygpt_net/data/locale/locale.uk.ini,sha256=FIbXUrfl6yuMf77mS5hz5wG60gB31CdQA6z7KA_yuXk,130136
|
|
1607
|
+
pygpt_net/data/locale/locale.zh.ini,sha256=Ns_BD1pj7nAmEzuSJiskHsrGOg3UdNw3DOr-s7ZGv3I,83809
|
|
1608
1608
|
pygpt_net/data/locale/plugin.agent.de.ini,sha256=BY28KpfFvgfVYJzcw2o5ScWnR4uuErIYGyc3NVHlmTw,1714
|
|
1609
1609
|
pygpt_net/data/locale/plugin.agent.en.ini,sha256=HwOWCI7e8uzlIgyRWRVyr1x6Xzs8Xjv5pfEc7jfLOo4,1728
|
|
1610
1610
|
pygpt_net/data/locale/plugin.agent.es.ini,sha256=bqaJQne8HPKFVtZ8Ukzo1TSqVW41yhYbGUqW3j2x1p8,1680
|
|
@@ -1829,7 +1829,7 @@ pygpt_net/plugin/audio_input/simple.py,sha256=rSqczTMzNbmMNNWGbt9j2M837szWRJpa_1
|
|
|
1829
1829
|
pygpt_net/plugin/audio_input/worker.py,sha256=-VmSUfsFl--95G9aHKOm9LqDw6F9-8HHS7ckAzcBW6A,12157
|
|
1830
1830
|
pygpt_net/plugin/audio_output/__init__.py,sha256=UglI8YPtzF_-buENrR0vqDuvzlK3CJdIXKx-iaJozZM,510
|
|
1831
1831
|
pygpt_net/plugin/audio_output/config.py,sha256=IA2K-9fQMZSwYGyi30Uh5qAlYwuqwaHo3dtDJ13vQdo,1208
|
|
1832
|
-
pygpt_net/plugin/audio_output/plugin.py,sha256=
|
|
1832
|
+
pygpt_net/plugin/audio_output/plugin.py,sha256=tncu3gPVVYZYF173ggnytuEh1AKDnUMUAlPDvZvu41Y,10276
|
|
1833
1833
|
pygpt_net/plugin/audio_output/worker.py,sha256=XhkY0uYlx1UIuAeWB3CA9MLvvDxI870E0iKJ0O2Lx10,3718
|
|
1834
1834
|
pygpt_net/plugin/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
1835
1835
|
pygpt_net/plugin/base/config.py,sha256=q5WAcF-h3KZH4bJFYANasM7UmV1v1c43fF1EZ05iF7Y,848
|
|
@@ -1950,10 +1950,10 @@ pygpt_net/provider/audio_input/openai_whisper.py,sha256=QABdVR4cC0VxKGLQyMJNnG6w
|
|
|
1950
1950
|
pygpt_net/provider/audio_input/openai_whisper_local.py,sha256=fkz8zvTgscJeNJ6Hkr0mS0IHE3tOTRqaZ7bgpOxV3p8,3129
|
|
1951
1951
|
pygpt_net/provider/audio_output/__init__.py,sha256=lOkgAiuNUqkAl_QrIG3ZsUznIZeJYtokgzEnDB8gRic,488
|
|
1952
1952
|
pygpt_net/provider/audio_output/base.py,sha256=1z3vuwUYOOzUD24GNZN_CyeO_VOhFUA35adpXAnXPHk,2002
|
|
1953
|
-
pygpt_net/provider/audio_output/eleven_labs.py,sha256=
|
|
1954
|
-
pygpt_net/provider/audio_output/google_tts.py,sha256=
|
|
1955
|
-
pygpt_net/provider/audio_output/ms_azure_tts.py,sha256
|
|
1956
|
-
pygpt_net/provider/audio_output/openai_tts.py,sha256=
|
|
1953
|
+
pygpt_net/provider/audio_output/eleven_labs.py,sha256=XN6-iZYOowbiWLNK9oBWW2DxT4DPWj6AQhA7P6xI1bM,4236
|
|
1954
|
+
pygpt_net/provider/audio_output/google_tts.py,sha256=4VMNBR4O8srUNTkdjuQn8jHmOdFV3XZIbrmMOdmAtAo,4291
|
|
1955
|
+
pygpt_net/provider/audio_output/ms_azure_tts.py,sha256=-8YlUwzUruR1cX6N5EZZfZSLOlOGbCvyHXTDwDD9GAg,4965
|
|
1956
|
+
pygpt_net/provider/audio_output/openai_tts.py,sha256=RauqWXxJ-aHc4by5462w0yOpZv5Ta1a9KhoS-8sMEqo,3010
|
|
1957
1957
|
pygpt_net/provider/core/__init__.py,sha256=YHQ86AUqjoP79N5vRqOmK-6FUDLQhCl_0y6Z4Tun3dU,488
|
|
1958
1958
|
pygpt_net/provider/core/assistant/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
|
|
1959
1959
|
pygpt_net/provider/core/assistant/base.py,sha256=zFJs4LxnBvUBri9-FgOYjv1wyfDFEzEu9H01GcTYL0s,1251
|
|
@@ -1984,8 +1984,8 @@ pygpt_net/provider/core/calendar/db_sqlite/provider.py,sha256=Q78xHqxIhnjukQmLIS
|
|
|
1984
1984
|
pygpt_net/provider/core/calendar/db_sqlite/storage.py,sha256=QDclQCQdr4QyRIqjgGXHBdHLbikJGqNhLlsEopMInDc,11204
|
|
1985
1985
|
pygpt_net/provider/core/config/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
|
|
1986
1986
|
pygpt_net/provider/core/config/base.py,sha256=cbvzbMNqL2XgC-36gGubnU37t94AX7LEw0lecb2Nm80,1365
|
|
1987
|
-
pygpt_net/provider/core/config/json_file.py,sha256=
|
|
1988
|
-
pygpt_net/provider/core/config/patch.py,sha256=
|
|
1987
|
+
pygpt_net/provider/core/config/json_file.py,sha256=GCcpCRQnBiSLWwlGbG9T3ZgiHkTfp5Jsg2KYkZcakBw,6789
|
|
1988
|
+
pygpt_net/provider/core/config/patch.py,sha256=xpTeLYRky7k77RtWD7XOhsEbWSoHKWXEps9FSjlDC7k,115715
|
|
1989
1989
|
pygpt_net/provider/core/ctx/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
|
|
1990
1990
|
pygpt_net/provider/core/ctx/base.py,sha256=Tfb4MDNe9BXXPU3lbzpdYwJF9S1oa2-mzgu5XT4It9g,3003
|
|
1991
1991
|
pygpt_net/provider/core/ctx/db_sqlite/__init__.py,sha256=0dP8VhI4bnFsQQKxAkaleKFlyaMycDD_cnE7gBCa57Y,512
|
|
@@ -2014,7 +2014,7 @@ pygpt_net/provider/core/mode/patch.py,sha256=VS2KCYW05jxLd-lcStNY1k4fHKUUrVVLTdR
|
|
|
2014
2014
|
pygpt_net/provider/core/model/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
|
|
2015
2015
|
pygpt_net/provider/core/model/base.py,sha256=L1x2rHha8a8hnCUYxZr88utay1EWEx5qBXW_2acpAN0,1319
|
|
2016
2016
|
pygpt_net/provider/core/model/json_file.py,sha256=l74l_n5PEHNp-FsoHtO9LHflz3RFKwDwKwOKN0stgZw,8418
|
|
2017
|
-
pygpt_net/provider/core/model/patch.py,sha256=
|
|
2017
|
+
pygpt_net/provider/core/model/patch.py,sha256=O_i0f0ixxObgMaZOZwBwF6hzRmZeujKHP07qBkTpeGQ,33661
|
|
2018
2018
|
pygpt_net/provider/core/notepad/__init__.py,sha256=jQQgG9u_ZLsZWXustoc1uvC-abUvj4RBKPAM30-f2Kc,488
|
|
2019
2019
|
pygpt_net/provider/core/notepad/base.py,sha256=7aPhild8cALTaN3JEbI0YrkIW1DRIycGQWTfsdH6WcQ,1323
|
|
2020
2020
|
pygpt_net/provider/core/notepad/db_sqlite/__init__.py,sha256=0dP8VhI4bnFsQQKxAkaleKFlyaMycDD_cnE7gBCa57Y,512
|
|
@@ -2379,8 +2379,8 @@ pygpt_net/ui/widget/textarea/web.py,sha256=7VTiA1LwWSUmbJ7r1uhKV-RFndOfNC_r_EAJi
|
|
|
2379
2379
|
pygpt_net/ui/widget/vision/__init__.py,sha256=8HT4tQFqQogEEpGYTv2RplKBthlsFKcl5egnv4lzzEw,488
|
|
2380
2380
|
pygpt_net/ui/widget/vision/camera.py,sha256=T8b5cmK6uhf_WSSxzPt_Qod8JgMnst6q8sQqRvgQiSA,2584
|
|
2381
2381
|
pygpt_net/utils.py,sha256=mQ_9Esbs2htAw82tPG5rzST8IZwXFwO2eLPsHRTCMJ4,6936
|
|
2382
|
-
pygpt_net-2.5.
|
|
2383
|
-
pygpt_net-2.5.
|
|
2384
|
-
pygpt_net-2.5.
|
|
2385
|
-
pygpt_net-2.5.
|
|
2386
|
-
pygpt_net-2.5.
|
|
2382
|
+
pygpt_net-2.5.92.dist-info/LICENSE,sha256=rbPqNB_xxANH8hKayJyIcTwD4bj4Y2G-Mcm85r1OImM,1126
|
|
2383
|
+
pygpt_net-2.5.92.dist-info/METADATA,sha256=cVNv0QesYPCO_rSvTPxFcQXMVw33rAayvRC2Tt8lqoo,182962
|
|
2384
|
+
pygpt_net-2.5.92.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
2385
|
+
pygpt_net-2.5.92.dist-info/entry_points.txt,sha256=qvpII6UHIt8XfokmQWnCYQrTgty8FeJ9hJvOuUFCN-8,43
|
|
2386
|
+
pygpt_net-2.5.92.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|