pygpt-net 2.6.63__py3-none-any.whl → 2.6.65__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +16 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +3 -1
- pygpt_net/controller/attachment/attachment.py +17 -8
- pygpt_net/controller/camera/camera.py +4 -4
- pygpt_net/controller/files/files.py +71 -2
- pygpt_net/controller/lang/custom.py +2 -2
- pygpt_net/controller/presets/editor.py +137 -22
- pygpt_net/controller/ui/mode.py +18 -3
- pygpt_net/core/agents/custom/__init__.py +18 -2
- pygpt_net/core/agents/custom/runner.py +2 -2
- pygpt_net/core/attachments/clipboard.py +146 -0
- pygpt_net/core/render/web/renderer.py +44 -11
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_coder.json +15 -1
- pygpt_net/data/css/style.dark.css +12 -0
- pygpt_net/data/css/style.light.css +12 -0
- pygpt_net/data/icons/pin2.svg +1 -0
- pygpt_net/data/icons/pin3.svg +3 -0
- pygpt_net/data/icons/point.svg +1 -0
- pygpt_net/data/icons/target.svg +1 -0
- pygpt_net/data/js/app/runtime.js +11 -4
- pygpt_net/data/js/app/scroll.js +14 -0
- pygpt_net/data/js/app/ui.js +19 -2
- pygpt_net/data/js/app/user.js +22 -54
- pygpt_net/data/js/app.min.js +13 -14
- pygpt_net/data/locale/locale.de.ini +32 -0
- pygpt_net/data/locale/locale.en.ini +38 -2
- pygpt_net/data/locale/locale.es.ini +32 -0
- pygpt_net/data/locale/locale.fr.ini +32 -0
- pygpt_net/data/locale/locale.it.ini +32 -0
- pygpt_net/data/locale/locale.pl.ini +34 -2
- pygpt_net/data/locale/locale.uk.ini +32 -0
- pygpt_net/data/locale/locale.zh.ini +32 -0
- pygpt_net/icons.qrc +4 -0
- pygpt_net/icons_rc.py +274 -137
- pygpt_net/js_rc.py +8262 -8230
- pygpt_net/provider/agents/llama_index/planner_workflow.py +15 -3
- pygpt_net/provider/agents/llama_index/workflow/planner.py +69 -41
- pygpt_net/provider/agents/openai/agent_planner.py +57 -35
- pygpt_net/provider/agents/openai/evolve.py +0 -3
- pygpt_net/provider/api/google/__init__.py +9 -3
- pygpt_net/provider/api/google/image.py +11 -1
- pygpt_net/provider/api/google/music.py +375 -0
- pygpt_net/provider/core/config/patch.py +8 -0
- pygpt_net/ui/__init__.py +6 -1
- pygpt_net/ui/dialog/preset.py +9 -4
- pygpt_net/ui/layout/chat/attachments.py +18 -1
- pygpt_net/ui/layout/status.py +3 -3
- pygpt_net/ui/widget/element/status.py +55 -0
- pygpt_net/ui/widget/filesystem/explorer.py +116 -2
- pygpt_net/ui/widget/lists/context.py +26 -16
- pygpt_net/ui/widget/option/combo.py +149 -11
- pygpt_net/ui/widget/textarea/input.py +71 -17
- pygpt_net/ui/widget/textarea/web.py +1 -1
- pygpt_net/ui/widget/vision/camera.py +135 -12
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.65.dist-info}/METADATA +18 -2
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.65.dist-info}/RECORD +62 -55
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.65.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.65.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.63.dist-info → pygpt_net-2.6.65.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,375 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.27 09:30:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
import base64
|
|
13
|
+
import datetime
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import shutil
|
|
17
|
+
import subprocess
|
|
18
|
+
from typing import Optional, Dict, Any, List
|
|
19
|
+
|
|
20
|
+
import requests
|
|
21
|
+
from PySide6.QtCore import QObject, Signal, QRunnable, Slot
|
|
22
|
+
from google import genai
|
|
23
|
+
|
|
24
|
+
from pygpt_net.core.events import KernelEvent
|
|
25
|
+
from pygpt_net.core.bridge.context import BridgeContext
|
|
26
|
+
from pygpt_net.item.ctx import CtxItem
|
|
27
|
+
from pygpt_net.utils import trans
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class Music:
|
|
31
|
+
"""
|
|
32
|
+
Text-to-music generation for Google (Lyria) in the "image" mode.
|
|
33
|
+
|
|
34
|
+
This class encapsulates the whole music generation flow using Vertex AI Lyria.
|
|
35
|
+
It mirrors the architecture used for images and videos (worker + signals),
|
|
36
|
+
and saves outputs to disk as WAV by default, with optional ffmpeg transcoding to MP3/MP4.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self, window=None):
|
|
40
|
+
self.window = window
|
|
41
|
+
self.worker: Optional[MusicWorker] = None
|
|
42
|
+
|
|
43
|
+
def is_music_model(self, model_id: str) -> bool:
|
|
44
|
+
"""
|
|
45
|
+
Heuristic check whether selected model is a music model (Lyria family).
|
|
46
|
+
|
|
47
|
+
:param model_id: model id
|
|
48
|
+
:return: True if Lyria/music model, False otherwise
|
|
49
|
+
"""
|
|
50
|
+
mid = (model_id or "").lower()
|
|
51
|
+
return "lyria" in mid or "music" in mid
|
|
52
|
+
|
|
53
|
+
def generate(
|
|
54
|
+
self,
|
|
55
|
+
context: BridgeContext,
|
|
56
|
+
extra: Optional[Dict[str, Any]] = None,
|
|
57
|
+
sync: bool = True
|
|
58
|
+
) -> bool:
|
|
59
|
+
"""
|
|
60
|
+
Generate music using Vertex AI Lyria.
|
|
61
|
+
|
|
62
|
+
:param context: BridgeContext with prompt, model, attachments (ignored)
|
|
63
|
+
:param extra: extra parameters:
|
|
64
|
+
- num: int, number of samples (maps to sample_count if no seed)
|
|
65
|
+
- seed: int, generation seed (mutually exclusive with sample_count)
|
|
66
|
+
- negative_prompt: str, negative prompt to exclude elements
|
|
67
|
+
- format: str, 'wav' (default), 'mp3', or 'mp4'
|
|
68
|
+
- inline: bool, inline mode
|
|
69
|
+
:param sync: run synchronously (blocking) if True
|
|
70
|
+
:return: True if started
|
|
71
|
+
"""
|
|
72
|
+
extra = extra or {}
|
|
73
|
+
ctx = context.ctx or CtxItem()
|
|
74
|
+
model = context.model
|
|
75
|
+
prompt = context.prompt or ""
|
|
76
|
+
inline = bool(extra.get("inline", False))
|
|
77
|
+
|
|
78
|
+
worker = MusicWorker()
|
|
79
|
+
worker.window = self.window
|
|
80
|
+
worker.client = self.window.core.api.google.get_client()
|
|
81
|
+
worker.ctx = ctx
|
|
82
|
+
|
|
83
|
+
# config
|
|
84
|
+
worker.model = (model.id if model else "lyria-002") # Lyria model id or resource
|
|
85
|
+
worker.input_prompt = prompt
|
|
86
|
+
worker.negative_prompt = extra.get("negative_prompt") or None
|
|
87
|
+
|
|
88
|
+
# sample_count vs seed (mutually exclusive)
|
|
89
|
+
worker.num = int(extra.get("num", 1))
|
|
90
|
+
seed = extra.get("seed")
|
|
91
|
+
worker.seed = int(seed) if seed not in (None, "") else None
|
|
92
|
+
|
|
93
|
+
# preferred output format
|
|
94
|
+
worker.out_format = str(extra.get("format") or self._default_format()).lower()
|
|
95
|
+
|
|
96
|
+
# optional prompt improvement
|
|
97
|
+
prompt_model = self.window.core.models.from_defaults()
|
|
98
|
+
tmp = self.window.core.config.get('music.prompt_model') or self.window.core.config.get('video.prompt_model')
|
|
99
|
+
if self.window.core.models.has(tmp):
|
|
100
|
+
prompt_model = self.window.core.models.get(tmp)
|
|
101
|
+
worker.model_prompt = prompt_model
|
|
102
|
+
worker.system_prompt = self.window.core.prompt.get('music') or self.window.core.prompt.get('video')
|
|
103
|
+
worker.raw = bool(self.window.core.config.get('img_raw'))
|
|
104
|
+
|
|
105
|
+
worker.inline = inline
|
|
106
|
+
|
|
107
|
+
self.worker = worker
|
|
108
|
+
# Reuse video handlers for UX consistency (status/messages/download area)
|
|
109
|
+
self.worker.signals.finished.connect(self.window.core.video.handle_finished)
|
|
110
|
+
self.worker.signals.finished_inline.connect(self.window.core.video.handle_finished_inline)
|
|
111
|
+
self.worker.signals.status.connect(self.window.core.video.handle_status)
|
|
112
|
+
self.worker.signals.error.connect(self.window.core.video.handle_error)
|
|
113
|
+
|
|
114
|
+
if sync or not self.window.controller.kernel.async_allowed(ctx):
|
|
115
|
+
self.worker.run()
|
|
116
|
+
return True
|
|
117
|
+
|
|
118
|
+
# Use video busy state for unified UX
|
|
119
|
+
self.window.dispatch(KernelEvent(KernelEvent.STATE_BUSY, {"id": "video"}))
|
|
120
|
+
self.window.threadpool.start(self.worker)
|
|
121
|
+
return True
|
|
122
|
+
|
|
123
|
+
def _default_format(self) -> str:
|
|
124
|
+
"""
|
|
125
|
+
Determine default preferred output format for saved files.
|
|
126
|
+
"""
|
|
127
|
+
# try config override
|
|
128
|
+
try:
|
|
129
|
+
fmt = self.window.core.config.get('music.format')
|
|
130
|
+
if isinstance(fmt, str) and fmt.strip():
|
|
131
|
+
return fmt.strip()
|
|
132
|
+
except Exception:
|
|
133
|
+
pass
|
|
134
|
+
# default to mp3 as most interoperable
|
|
135
|
+
return "mp3"
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class MusicSignals(QObject):
|
|
139
|
+
finished = Signal(object, list, str) # ctx, paths, prompt
|
|
140
|
+
finished_inline = Signal(object, list, str) # ctx, paths, prompt
|
|
141
|
+
status = Signal(object) # message
|
|
142
|
+
error = Signal(object) # exception
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class MusicWorker(QRunnable):
|
|
146
|
+
"""
|
|
147
|
+
Worker that calls the Vertex AI Lyria REST API (predict) to generate audio,
|
|
148
|
+
then saves to disk. Transcodes to MP3/MP4 if ffmpeg is available and requested.
|
|
149
|
+
"""
|
|
150
|
+
|
|
151
|
+
def __init__(self, *args, **kwargs):
|
|
152
|
+
super().__init__()
|
|
153
|
+
self.signals = MusicSignals()
|
|
154
|
+
self.window = None
|
|
155
|
+
self.client: Optional[genai.Client] = None
|
|
156
|
+
self.ctx: Optional[CtxItem] = None
|
|
157
|
+
|
|
158
|
+
# inputs
|
|
159
|
+
self.model: str = "lyria-002"
|
|
160
|
+
self.input_prompt: str = ""
|
|
161
|
+
self.negative_prompt: Optional[str] = None
|
|
162
|
+
self.num: int = 1
|
|
163
|
+
self.seed: Optional[int] = None
|
|
164
|
+
self.out_format: str = "mp3" # wav | mp3 | mp4
|
|
165
|
+
|
|
166
|
+
# prompt improvement
|
|
167
|
+
self.model_prompt = None
|
|
168
|
+
self.system_prompt = ""
|
|
169
|
+
self.raw = False
|
|
170
|
+
|
|
171
|
+
# ui
|
|
172
|
+
self.inline: bool = False
|
|
173
|
+
|
|
174
|
+
@Slot()
|
|
175
|
+
def run(self):
|
|
176
|
+
try:
|
|
177
|
+
# Validate Vertex configuration first
|
|
178
|
+
if not self._using_vertex():
|
|
179
|
+
raise RuntimeError(
|
|
180
|
+
"Vertex AI is required for music (Lyria). Enable Vertex in settings and configure credentials."
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Optional prompt enhancement via LLM
|
|
184
|
+
if not self.raw and self.input_prompt:
|
|
185
|
+
try:
|
|
186
|
+
self.signals.status.emit(trans('vid.status.prompt.wait'))
|
|
187
|
+
bridge_context = BridgeContext(
|
|
188
|
+
prompt=self.input_prompt,
|
|
189
|
+
system_prompt=self.system_prompt,
|
|
190
|
+
model=self.model_prompt,
|
|
191
|
+
max_tokens=200,
|
|
192
|
+
temperature=1.0,
|
|
193
|
+
)
|
|
194
|
+
ev = KernelEvent(KernelEvent.CALL, {'context': bridge_context, 'extra': {}})
|
|
195
|
+
self.window.dispatch(ev)
|
|
196
|
+
resp = ev.data.get('response')
|
|
197
|
+
if resp:
|
|
198
|
+
self.input_prompt = resp
|
|
199
|
+
except Exception as e:
|
|
200
|
+
# non-fatal
|
|
201
|
+
self.signals.error.emit(e)
|
|
202
|
+
self.signals.status.emit(trans('vid.status.prompt.error') + ": " + str(e))
|
|
203
|
+
|
|
204
|
+
# Build request
|
|
205
|
+
project = os.getenv("GOOGLE_CLOUD_PROJECT", "")
|
|
206
|
+
location = os.getenv("GOOGLE_CLOUD_LOCATION", "us-central1")
|
|
207
|
+
model_id = self._normalize_model_id(self.model)
|
|
208
|
+
|
|
209
|
+
url = f"https://{location}-aiplatform.googleapis.com/v1/projects/{project}/locations/{location}/publishers/google/models/{model_id}:predict"
|
|
210
|
+
token = self._get_access_token()
|
|
211
|
+
|
|
212
|
+
headers = {
|
|
213
|
+
"Authorization": f"Bearer {token}",
|
|
214
|
+
"Content-Type": "application/json",
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
# sample_count vs seed (mutually exclusive per Lyria API)
|
|
218
|
+
instances: Dict[str, Any] = {
|
|
219
|
+
"prompt": self.input_prompt,
|
|
220
|
+
}
|
|
221
|
+
if self.negative_prompt:
|
|
222
|
+
instances["negative_prompt"] = self.negative_prompt
|
|
223
|
+
|
|
224
|
+
params: Dict[str, Any] = {}
|
|
225
|
+
if self.seed is not None and self.num > 1:
|
|
226
|
+
# Keep API valid: if seed is set, do not set sample_count
|
|
227
|
+
self.signals.status.emit("Seed provided; generating a single seeded sample (sample_count ignored).")
|
|
228
|
+
if self.seed is not None:
|
|
229
|
+
instances["seed"] = int(self.seed)
|
|
230
|
+
elif self.num > 1:
|
|
231
|
+
params["sample_count"] = int(self.num)
|
|
232
|
+
|
|
233
|
+
body = {
|
|
234
|
+
"instances": [instances],
|
|
235
|
+
"parameters": params,
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
self.signals.status.emit(trans('vid.status.generating') + f": {self.input_prompt}...")
|
|
239
|
+
|
|
240
|
+
# Call REST API
|
|
241
|
+
resp = requests.post(url, headers=headers, data=json.dumps(body), timeout=120)
|
|
242
|
+
if resp.status_code != 200:
|
|
243
|
+
raise RuntimeError(f"Lyria API error: {resp.status_code} {resp.text}")
|
|
244
|
+
|
|
245
|
+
data = resp.json() or {}
|
|
246
|
+
preds = data.get("predictions") or []
|
|
247
|
+
if not preds:
|
|
248
|
+
raise RuntimeError("No audio predictions returned.")
|
|
249
|
+
|
|
250
|
+
# Decode and save each audio clip
|
|
251
|
+
paths: List[str] = []
|
|
252
|
+
for idx, p in enumerate(preds):
|
|
253
|
+
# API returns base64-encoded WAV in audioContent
|
|
254
|
+
b64 = p.get("audioContent") or ""
|
|
255
|
+
mime = p.get("mimeType") or "audio/wav"
|
|
256
|
+
if not b64:
|
|
257
|
+
continue
|
|
258
|
+
wav_bytes = base64.b64decode(b64)
|
|
259
|
+
|
|
260
|
+
saved_path = self._save(idx, wav_bytes, mime)
|
|
261
|
+
if saved_path:
|
|
262
|
+
paths.append(saved_path)
|
|
263
|
+
|
|
264
|
+
if self.inline:
|
|
265
|
+
self.signals.finished_inline.emit(self.ctx, paths, self.input_prompt)
|
|
266
|
+
else:
|
|
267
|
+
self.signals.finished.emit(self.ctx, paths, self.input_prompt)
|
|
268
|
+
|
|
269
|
+
except Exception as e:
|
|
270
|
+
self.signals.error.emit(e)
|
|
271
|
+
finally:
|
|
272
|
+
self._cleanup()
|
|
273
|
+
|
|
274
|
+
# ---------- helpers ----------
|
|
275
|
+
|
|
276
|
+
def _using_vertex(self) -> bool:
|
|
277
|
+
"""Check if Vertex AI is active via env variable set by ApiGoogle.setup_env()."""
|
|
278
|
+
val = os.getenv("GOOGLE_GENAI_USE_VERTEXAI") or ""
|
|
279
|
+
return str(val).lower() in ("1", "true", "yes", "y")
|
|
280
|
+
|
|
281
|
+
def _normalize_model_id(self, model_id: str) -> str:
|
|
282
|
+
"""
|
|
283
|
+
Normalize model id to the publisher form 'lyria-002'.
|
|
284
|
+
Accepts inputs like 'lyria-002', 'models/lyria-002', or full resource names.
|
|
285
|
+
"""
|
|
286
|
+
if not model_id:
|
|
287
|
+
return "lyria-002"
|
|
288
|
+
# get trailing token after '/'
|
|
289
|
+
tail = model_id.split("/")[-1]
|
|
290
|
+
if tail.startswith("lyria"):
|
|
291
|
+
return tail
|
|
292
|
+
# fallback
|
|
293
|
+
return "lyria-002"
|
|
294
|
+
|
|
295
|
+
def _get_access_token(self) -> str:
|
|
296
|
+
"""
|
|
297
|
+
Obtain OAuth2 access token for Vertex AI (cloud-platform scope) using ADC.
|
|
298
|
+
"""
|
|
299
|
+
try:
|
|
300
|
+
import google.auth
|
|
301
|
+
from google.auth.transport.requests import Request as GRequest
|
|
302
|
+
creds, _ = google.auth.default(scopes=["https://www.googleapis.com/auth/cloud-platform"])
|
|
303
|
+
creds.refresh(GRequest())
|
|
304
|
+
return creds.token
|
|
305
|
+
except Exception as e:
|
|
306
|
+
# As a fallback, allow passing a pre-fetched token via env var.
|
|
307
|
+
token = os.getenv("VERTEX_AI_TOKEN") or os.getenv("GCP_ACCESS_TOKEN") or ""
|
|
308
|
+
if token:
|
|
309
|
+
return token
|
|
310
|
+
raise e
|
|
311
|
+
|
|
312
|
+
def _save(self, idx: int, wav_bytes: bytes, mime: str) -> Optional[str]:
|
|
313
|
+
"""
|
|
314
|
+
Save audio to disk. Primary save as WAV; then optionally transcode to MP3/MP4 if requested.
|
|
315
|
+
"""
|
|
316
|
+
# Build base filename
|
|
317
|
+
safe = self.window.core.video.make_safe_filename(self.input_prompt)
|
|
318
|
+
base_name = (
|
|
319
|
+
datetime.date.today().strftime("%Y-%m-%d") + "_" +
|
|
320
|
+
datetime.datetime.now().strftime("%H-%M-%S") + "-" +
|
|
321
|
+
safe + "-" + str(idx + 1)
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# Directories: prefer 'audio' user dir if available, else fallback to 'video'
|
|
325
|
+
try:
|
|
326
|
+
out_dir = self.window.core.config.get_user_dir("audio")
|
|
327
|
+
except Exception:
|
|
328
|
+
out_dir = self.window.core.config.get_user_dir("video")
|
|
329
|
+
|
|
330
|
+
os.makedirs(out_dir, exist_ok=True)
|
|
331
|
+
|
|
332
|
+
# Always persist a WAV first (what API returns)
|
|
333
|
+
wav_path = os.path.join(out_dir, base_name + ".wav")
|
|
334
|
+
self.signals.status.emit(trans('vid.status.downloading') + f" ({idx + 1} / {max(1, self.num)}) -> {wav_path}")
|
|
335
|
+
with open(wav_path, "wb") as f:
|
|
336
|
+
f.write(wav_bytes)
|
|
337
|
+
|
|
338
|
+
# Transcode if needed
|
|
339
|
+
fmt = (self.out_format or "mp3").lower()
|
|
340
|
+
if fmt == "wav":
|
|
341
|
+
return wav_path
|
|
342
|
+
|
|
343
|
+
ffmpeg = shutil.which("ffmpeg") or shutil.which("ffmpeg.exe")
|
|
344
|
+
if not ffmpeg:
|
|
345
|
+
# No ffmpeg -> keep WAV
|
|
346
|
+
self.signals.status.emit("ffmpeg not found. Saved WAV output only.")
|
|
347
|
+
return wav_path
|
|
348
|
+
|
|
349
|
+
if fmt == "mp3":
|
|
350
|
+
out_path = os.path.join(out_dir, base_name + ".mp3")
|
|
351
|
+
cmd = [ffmpeg, "-y", "-i", wav_path, "-vn", "-codec:a", "libmp3lame", "-b:a", "192k", out_path]
|
|
352
|
+
elif fmt == "mp4":
|
|
353
|
+
out_path = os.path.join(out_dir, base_name + ".mp4")
|
|
354
|
+
# audio-only MP4 (AAC)
|
|
355
|
+
cmd = [ffmpeg, "-y", "-i", wav_path, "-vn", "-c:a", "aac", "-b:a", "192k", out_path]
|
|
356
|
+
else:
|
|
357
|
+
# Unknown format -> keep WAV
|
|
358
|
+
return wav_path
|
|
359
|
+
|
|
360
|
+
try:
|
|
361
|
+
subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
|
|
362
|
+
return out_path
|
|
363
|
+
except Exception:
|
|
364
|
+
# If transcoding fails, keep WAV
|
|
365
|
+
return wav_path
|
|
366
|
+
|
|
367
|
+
def _cleanup(self):
|
|
368
|
+
"""Cleanup signals object."""
|
|
369
|
+
sig = self.signals
|
|
370
|
+
self.signals = None
|
|
371
|
+
if sig is not None:
|
|
372
|
+
try:
|
|
373
|
+
sig.deleteLater()
|
|
374
|
+
except RuntimeError:
|
|
375
|
+
pass
|
|
@@ -168,6 +168,14 @@ class Patch:
|
|
|
168
168
|
patch_css('style.dark.css', True)
|
|
169
169
|
updated = True
|
|
170
170
|
|
|
171
|
+
# < 2.6.65
|
|
172
|
+
if old < parse_version("2.6.65"):
|
|
173
|
+
print("Migrating config from < 2.6.65...")
|
|
174
|
+
# add: status bar css
|
|
175
|
+
patch_css('style.light.css', True)
|
|
176
|
+
patch_css('style.dark.css', True)
|
|
177
|
+
updated = True
|
|
178
|
+
|
|
171
179
|
# update file
|
|
172
180
|
migrated = False
|
|
173
181
|
if updated:
|
pygpt_net/ui/__init__.py
CHANGED
|
@@ -113,6 +113,8 @@ class UI:
|
|
|
113
113
|
"""Set default sizes"""
|
|
114
114
|
def set_initial_splitter_height():
|
|
115
115
|
"""Set initial splitter height"""
|
|
116
|
+
if 'main.output' not in self.window.ui.splitters:
|
|
117
|
+
return
|
|
116
118
|
total_height = self.window.ui.splitters['main.output'].size().height()
|
|
117
119
|
if total_height > 0:
|
|
118
120
|
size_output = int(total_height * 0.9)
|
|
@@ -124,6 +126,8 @@ class UI:
|
|
|
124
126
|
|
|
125
127
|
def set_initial_splitter_width():
|
|
126
128
|
"""Set initial splitter width"""
|
|
129
|
+
if 'main' not in self.window.ui.splitters:
|
|
130
|
+
return
|
|
127
131
|
total_width = self.window.ui.splitters['main'].size().width()
|
|
128
132
|
if total_width > 0:
|
|
129
133
|
size_output = int(total_width * 0.75)
|
|
@@ -139,7 +143,8 @@ class UI:
|
|
|
139
143
|
suffix = self.window.core.platforms.get_env_suffix()
|
|
140
144
|
profile_name = self.window.core.config.profile.get_current_name()
|
|
141
145
|
self.window.setWindowTitle(
|
|
142
|
-
f"PyGPT - Desktop AI Assistant {self.window.meta['version']} |
|
|
146
|
+
f"PyGPT - Desktop AI Assistant {self.window.meta['version']} | "
|
|
147
|
+
f"build {self.window.meta['build'].replace('.', '-')}{suffix} ({profile_name})"
|
|
143
148
|
)
|
|
144
149
|
|
|
145
150
|
def post_setup(self):
|
pygpt_net/ui/dialog/preset.py
CHANGED
|
@@ -6,13 +6,13 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.28 08:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from PySide6.QtCore import Qt
|
|
13
13
|
from PySide6.QtGui import QIcon
|
|
14
14
|
from PySide6.QtWidgets import QPushButton, QHBoxLayout, QLabel, QVBoxLayout, QSplitter, QWidget, QSizePolicy, \
|
|
15
|
-
QTabWidget, QFileDialog
|
|
15
|
+
QTabWidget, QFileDialog, QScrollArea, QFrame
|
|
16
16
|
|
|
17
17
|
from pygpt_net.core.types import (
|
|
18
18
|
MODE_AGENT,
|
|
@@ -169,7 +169,6 @@ class Preset(BaseConfigDialog):
|
|
|
169
169
|
desc.setContentsMargins(0, 5, 0, 5)
|
|
170
170
|
self.window.ui.nodes['preset.editor.description'] = desc
|
|
171
171
|
|
|
172
|
-
|
|
173
172
|
# prompt + extra options
|
|
174
173
|
prompt_layout = QVBoxLayout()
|
|
175
174
|
prompt_layout.addWidget(widgets['prompt'])
|
|
@@ -334,8 +333,14 @@ class Preset(BaseConfigDialog):
|
|
|
334
333
|
self.window.ui.nodes['preset.editor.extra'] = {}
|
|
335
334
|
|
|
336
335
|
tabs = QTabWidget()
|
|
336
|
+
|
|
337
|
+
# Make the prompt tab scrollable to avoid vertical overlap in narrow layouts.
|
|
338
|
+
scroll_prompt = QScrollArea()
|
|
339
|
+
scroll_prompt.setWidget(prompt_widget)
|
|
340
|
+
scroll_prompt.setWidgetResizable(True)
|
|
341
|
+
scroll_prompt.setFrameShape(QFrame.NoFrame)
|
|
337
342
|
tabs.addTab(
|
|
338
|
-
|
|
343
|
+
scroll_prompt,
|
|
339
344
|
trans("preset.prompt"),
|
|
340
345
|
)
|
|
341
346
|
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.09.28 08:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -19,8 +19,10 @@ from PySide6.QtWidgets import QVBoxLayout, QPushButton, QHBoxLayout, QCheckBox,
|
|
|
19
19
|
from pygpt_net.item.attachment import AttachmentItem
|
|
20
20
|
from pygpt_net.ui.widget.element.labels import HelpLabel
|
|
21
21
|
from pygpt_net.ui.widget.lists.attachment import AttachmentList
|
|
22
|
+
from pygpt_net.core.attachments.clipboard import AttachmentDropHandler
|
|
22
23
|
from pygpt_net.utils import trans
|
|
23
24
|
|
|
25
|
+
|
|
24
26
|
class Attachments:
|
|
25
27
|
def __init__(self, window=None):
|
|
26
28
|
"""
|
|
@@ -30,6 +32,8 @@ class Attachments:
|
|
|
30
32
|
"""
|
|
31
33
|
self.window = window
|
|
32
34
|
self.id = 'attachments'
|
|
35
|
+
# Keep a strong reference to DnD handler(s)
|
|
36
|
+
self._dnd_handlers = {}
|
|
33
37
|
|
|
34
38
|
def setup(self) -> QVBoxLayout:
|
|
35
39
|
"""
|
|
@@ -132,6 +136,19 @@ class Attachments:
|
|
|
132
136
|
self.window.ui.models[self.id] = self.create_model(self.window)
|
|
133
137
|
self.window.ui.nodes[self.id].setModel(self.window.ui.models[self.id])
|
|
134
138
|
|
|
139
|
+
# Drag & Drop: allow dropping files/images/urls/text directly onto the list
|
|
140
|
+
try:
|
|
141
|
+
self._dnd_handlers[self.id] = AttachmentDropHandler(
|
|
142
|
+
self.window,
|
|
143
|
+
self.window.ui.nodes[self.id],
|
|
144
|
+
policy=AttachmentDropHandler.SWALLOW_ALL,
|
|
145
|
+
)
|
|
146
|
+
except Exception as e:
|
|
147
|
+
try:
|
|
148
|
+
self.window.core.debug.log(e)
|
|
149
|
+
except Exception:
|
|
150
|
+
pass
|
|
151
|
+
|
|
135
152
|
def create_model(self, parent) -> QStandardItemModel:
|
|
136
153
|
"""
|
|
137
154
|
Create list model
|
pygpt_net/ui/layout/status.py
CHANGED
|
@@ -14,6 +14,7 @@ from PySide6.QtWidgets import QLabel, QHBoxLayout, QSizePolicy, QPushButton
|
|
|
14
14
|
|
|
15
15
|
from pygpt_net.ui.widget.element.labels import HelpLabel
|
|
16
16
|
from pygpt_net.ui.widget.anims.loader import Loader
|
|
17
|
+
from pygpt_net.ui.widget.element.status import BottomStatus
|
|
17
18
|
from pygpt_net.utils import trans
|
|
18
19
|
|
|
19
20
|
|
|
@@ -34,8 +35,7 @@ class Status:
|
|
|
34
35
|
"""
|
|
35
36
|
nodes = self.window.ui.nodes
|
|
36
37
|
|
|
37
|
-
nodes['status'] =
|
|
38
|
-
nodes['status'].setParent(self.window)
|
|
38
|
+
nodes['status'] = BottomStatus(window=self.window)
|
|
39
39
|
|
|
40
40
|
nodes['status.agent'] = HelpLabel("")
|
|
41
41
|
nodes['status.agent'].setParent(self.window)
|
|
@@ -53,7 +53,7 @@ class Status:
|
|
|
53
53
|
layout = QHBoxLayout()
|
|
54
54
|
layout.addWidget(nodes['anim.loading.status'])
|
|
55
55
|
layout.addWidget(nodes['status.agent'])
|
|
56
|
-
layout.addWidget(nodes['status'])
|
|
56
|
+
layout.addWidget(nodes['status'].setup())
|
|
57
57
|
layout.addWidget(nodes['global.stop'])
|
|
58
58
|
layout.setAlignment(Qt.AlignLeft)
|
|
59
59
|
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.28 00:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
from PySide6.QtWidgets import QLabel, QHBoxLayout, QWidget
|
|
13
|
+
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
from pygpt_net.utils import trans
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class BottomStatus:
|
|
19
|
+
def __init__(self, window=None):
|
|
20
|
+
self.window = window
|
|
21
|
+
self.timer = QLabel(parent=self.window)
|
|
22
|
+
self.timer.setObjectName("StatusBarTimer")
|
|
23
|
+
self.msg = QLabel(parent=self.window)
|
|
24
|
+
self.msg.setObjectName("StatusBarMessage")
|
|
25
|
+
self.set_text(trans('status.started'))
|
|
26
|
+
|
|
27
|
+
def set_text(self, text):
|
|
28
|
+
"""Set status text"""
|
|
29
|
+
self.msg.setText(text)
|
|
30
|
+
if text:
|
|
31
|
+
now = datetime.now()
|
|
32
|
+
self.timer.setText(now.strftime("%H:%M"))
|
|
33
|
+
else:
|
|
34
|
+
self.timer.setText("")
|
|
35
|
+
|
|
36
|
+
def setText(self, text):
|
|
37
|
+
"""Fallback for set_text method"""
|
|
38
|
+
self.set_text(text)
|
|
39
|
+
|
|
40
|
+
def text(self) -> str:
|
|
41
|
+
"""Get status text"""
|
|
42
|
+
return self.msg.text()
|
|
43
|
+
|
|
44
|
+
def setup(self):
|
|
45
|
+
"""Setup status bar widget"""
|
|
46
|
+
self.timer.setText("00:00")
|
|
47
|
+
layout = QHBoxLayout()
|
|
48
|
+
layout.setContentsMargins(0, 0, 0, 0)
|
|
49
|
+
layout.setSpacing(5)
|
|
50
|
+
layout.addWidget(self.timer)
|
|
51
|
+
layout.addWidget(self.msg)
|
|
52
|
+
layout.addStretch()
|
|
53
|
+
widget = QWidget(self.window)
|
|
54
|
+
widget.setLayout(layout)
|
|
55
|
+
return widget
|