pygpt-net 2.6.62__py3-none-any.whl → 2.6.64__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +11 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/attachment/attachment.py +17 -8
- pygpt_net/controller/camera/camera.py +4 -4
- pygpt_net/controller/lang/custom.py +2 -2
- pygpt_net/controller/presets/editor.py +65 -1
- pygpt_net/controller/ui/mode.py +18 -3
- pygpt_net/core/agents/custom/llama_index/runner.py +15 -52
- pygpt_net/core/agents/custom/runner.py +194 -76
- pygpt_net/core/agents/runners/llama_workflow.py +60 -10
- pygpt_net/core/render/web/renderer.py +11 -0
- pygpt_net/data/config/config.json +3 -3
- pygpt_net/data/config/models.json +3 -3
- pygpt_net/data/config/presets/agent_openai_b2b.json +1 -15
- pygpt_net/data/config/presets/agent_openai_coder.json +0 -0
- pygpt_net/data/config/presets/agent_openai_evolve.json +1 -23
- pygpt_net/data/config/presets/agent_openai_planner.json +1 -21
- pygpt_net/data/config/presets/agent_openai_researcher.json +1 -21
- pygpt_net/data/config/presets/agent_openai_supervisor.json +1 -13
- pygpt_net/data/config/presets/agent_openai_writer.json +1 -15
- pygpt_net/data/config/presets/agent_supervisor.json +1 -11
- pygpt_net/data/js/app/runtime.js +10 -0
- pygpt_net/data/js/app/scroll.js +14 -0
- pygpt_net/data/js/app.min.js +6 -4
- pygpt_net/data/locale/locale.de.ini +32 -0
- pygpt_net/data/locale/locale.en.ini +37 -0
- pygpt_net/data/locale/locale.es.ini +32 -0
- pygpt_net/data/locale/locale.fr.ini +32 -0
- pygpt_net/data/locale/locale.it.ini +32 -0
- pygpt_net/data/locale/locale.pl.ini +34 -2
- pygpt_net/data/locale/locale.uk.ini +32 -0
- pygpt_net/data/locale/locale.zh.ini +32 -0
- pygpt_net/js_rc.py +7571 -7499
- pygpt_net/provider/agents/base.py +0 -0
- pygpt_net/provider/agents/llama_index/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/llama_index/planner_workflow.py +15 -3
- pygpt_net/provider/agents/llama_index/workflow/codeact.py +0 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +272 -44
- pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
- pygpt_net/provider/agents/openai/agent.py +0 -0
- pygpt_net/provider/agents/openai/agent_b2b.py +4 -4
- pygpt_net/provider/agents/openai/agent_planner.py +631 -254
- pygpt_net/provider/agents/openai/agent_with_experts.py +0 -0
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
- pygpt_net/provider/agents/openai/evolve.py +6 -9
- pygpt_net/provider/agents/openai/flow_from_schema.py +0 -0
- pygpt_net/provider/agents/openai/supervisor.py +290 -37
- pygpt_net/provider/api/google/__init__.py +9 -3
- pygpt_net/provider/api/google/image.py +11 -1
- pygpt_net/provider/api/google/music.py +375 -0
- pygpt_net/provider/api/x_ai/__init__.py +0 -0
- pygpt_net/provider/core/agent/__init__.py +0 -0
- pygpt_net/provider/core/agent/base.py +0 -0
- pygpt_net/provider/core/agent/json_file.py +0 -0
- pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -0
- pygpt_net/provider/llms/base.py +0 -0
- pygpt_net/provider/llms/deepseek_api.py +0 -0
- pygpt_net/provider/llms/google.py +0 -0
- pygpt_net/provider/llms/hugging_face_api.py +0 -0
- pygpt_net/provider/llms/hugging_face_router.py +0 -0
- pygpt_net/provider/llms/mistral.py +0 -0
- pygpt_net/provider/llms/perplexity.py +0 -0
- pygpt_net/provider/llms/x_ai.py +0 -0
- pygpt_net/ui/widget/dialog/confirm.py +34 -8
- pygpt_net/ui/widget/option/combo.py +149 -11
- pygpt_net/ui/widget/textarea/input.py +1 -1
- pygpt_net/ui/widget/textarea/web.py +1 -1
- pygpt_net/ui/widget/vision/camera.py +135 -12
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/METADATA +13 -2
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/RECORD +53 -52
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.62.dist-info → pygpt_net-2.6.64.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,375 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.09.27 09:30:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
import base64
|
|
13
|
+
import datetime
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import shutil
|
|
17
|
+
import subprocess
|
|
18
|
+
from typing import Optional, Dict, Any, List
|
|
19
|
+
|
|
20
|
+
import requests
|
|
21
|
+
from PySide6.QtCore import QObject, Signal, QRunnable, Slot
|
|
22
|
+
from google import genai
|
|
23
|
+
|
|
24
|
+
from pygpt_net.core.events import KernelEvent
|
|
25
|
+
from pygpt_net.core.bridge.context import BridgeContext
|
|
26
|
+
from pygpt_net.item.ctx import CtxItem
|
|
27
|
+
from pygpt_net.utils import trans
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class Music:
|
|
31
|
+
"""
|
|
32
|
+
Text-to-music generation for Google (Lyria) in the "image" mode.
|
|
33
|
+
|
|
34
|
+
This class encapsulates the whole music generation flow using Vertex AI Lyria.
|
|
35
|
+
It mirrors the architecture used for images and videos (worker + signals),
|
|
36
|
+
and saves outputs to disk as WAV by default, with optional ffmpeg transcoding to MP3/MP4.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self, window=None):
|
|
40
|
+
self.window = window
|
|
41
|
+
self.worker: Optional[MusicWorker] = None
|
|
42
|
+
|
|
43
|
+
def is_music_model(self, model_id: str) -> bool:
|
|
44
|
+
"""
|
|
45
|
+
Heuristic check whether selected model is a music model (Lyria family).
|
|
46
|
+
|
|
47
|
+
:param model_id: model id
|
|
48
|
+
:return: True if Lyria/music model, False otherwise
|
|
49
|
+
"""
|
|
50
|
+
mid = (model_id or "").lower()
|
|
51
|
+
return "lyria" in mid or "music" in mid
|
|
52
|
+
|
|
53
|
+
def generate(
|
|
54
|
+
self,
|
|
55
|
+
context: BridgeContext,
|
|
56
|
+
extra: Optional[Dict[str, Any]] = None,
|
|
57
|
+
sync: bool = True
|
|
58
|
+
) -> bool:
|
|
59
|
+
"""
|
|
60
|
+
Generate music using Vertex AI Lyria.
|
|
61
|
+
|
|
62
|
+
:param context: BridgeContext with prompt, model, attachments (ignored)
|
|
63
|
+
:param extra: extra parameters:
|
|
64
|
+
- num: int, number of samples (maps to sample_count if no seed)
|
|
65
|
+
- seed: int, generation seed (mutually exclusive with sample_count)
|
|
66
|
+
- negative_prompt: str, negative prompt to exclude elements
|
|
67
|
+
- format: str, 'wav' (default), 'mp3', or 'mp4'
|
|
68
|
+
- inline: bool, inline mode
|
|
69
|
+
:param sync: run synchronously (blocking) if True
|
|
70
|
+
:return: True if started
|
|
71
|
+
"""
|
|
72
|
+
extra = extra or {}
|
|
73
|
+
ctx = context.ctx or CtxItem()
|
|
74
|
+
model = context.model
|
|
75
|
+
prompt = context.prompt or ""
|
|
76
|
+
inline = bool(extra.get("inline", False))
|
|
77
|
+
|
|
78
|
+
worker = MusicWorker()
|
|
79
|
+
worker.window = self.window
|
|
80
|
+
worker.client = self.window.core.api.google.get_client()
|
|
81
|
+
worker.ctx = ctx
|
|
82
|
+
|
|
83
|
+
# config
|
|
84
|
+
worker.model = (model.id if model else "lyria-002") # Lyria model id or resource
|
|
85
|
+
worker.input_prompt = prompt
|
|
86
|
+
worker.negative_prompt = extra.get("negative_prompt") or None
|
|
87
|
+
|
|
88
|
+
# sample_count vs seed (mutually exclusive)
|
|
89
|
+
worker.num = int(extra.get("num", 1))
|
|
90
|
+
seed = extra.get("seed")
|
|
91
|
+
worker.seed = int(seed) if seed not in (None, "") else None
|
|
92
|
+
|
|
93
|
+
# preferred output format
|
|
94
|
+
worker.out_format = str(extra.get("format") or self._default_format()).lower()
|
|
95
|
+
|
|
96
|
+
# optional prompt improvement
|
|
97
|
+
prompt_model = self.window.core.models.from_defaults()
|
|
98
|
+
tmp = self.window.core.config.get('music.prompt_model') or self.window.core.config.get('video.prompt_model')
|
|
99
|
+
if self.window.core.models.has(tmp):
|
|
100
|
+
prompt_model = self.window.core.models.get(tmp)
|
|
101
|
+
worker.model_prompt = prompt_model
|
|
102
|
+
worker.system_prompt = self.window.core.prompt.get('music') or self.window.core.prompt.get('video')
|
|
103
|
+
worker.raw = bool(self.window.core.config.get('img_raw'))
|
|
104
|
+
|
|
105
|
+
worker.inline = inline
|
|
106
|
+
|
|
107
|
+
self.worker = worker
|
|
108
|
+
# Reuse video handlers for UX consistency (status/messages/download area)
|
|
109
|
+
self.worker.signals.finished.connect(self.window.core.video.handle_finished)
|
|
110
|
+
self.worker.signals.finished_inline.connect(self.window.core.video.handle_finished_inline)
|
|
111
|
+
self.worker.signals.status.connect(self.window.core.video.handle_status)
|
|
112
|
+
self.worker.signals.error.connect(self.window.core.video.handle_error)
|
|
113
|
+
|
|
114
|
+
if sync or not self.window.controller.kernel.async_allowed(ctx):
|
|
115
|
+
self.worker.run()
|
|
116
|
+
return True
|
|
117
|
+
|
|
118
|
+
# Use video busy state for unified UX
|
|
119
|
+
self.window.dispatch(KernelEvent(KernelEvent.STATE_BUSY, {"id": "video"}))
|
|
120
|
+
self.window.threadpool.start(self.worker)
|
|
121
|
+
return True
|
|
122
|
+
|
|
123
|
+
def _default_format(self) -> str:
|
|
124
|
+
"""
|
|
125
|
+
Determine default preferred output format for saved files.
|
|
126
|
+
"""
|
|
127
|
+
# try config override
|
|
128
|
+
try:
|
|
129
|
+
fmt = self.window.core.config.get('music.format')
|
|
130
|
+
if isinstance(fmt, str) and fmt.strip():
|
|
131
|
+
return fmt.strip()
|
|
132
|
+
except Exception:
|
|
133
|
+
pass
|
|
134
|
+
# default to mp3 as most interoperable
|
|
135
|
+
return "mp3"
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class MusicSignals(QObject):
|
|
139
|
+
finished = Signal(object, list, str) # ctx, paths, prompt
|
|
140
|
+
finished_inline = Signal(object, list, str) # ctx, paths, prompt
|
|
141
|
+
status = Signal(object) # message
|
|
142
|
+
error = Signal(object) # exception
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class MusicWorker(QRunnable):
|
|
146
|
+
"""
|
|
147
|
+
Worker that calls the Vertex AI Lyria REST API (predict) to generate audio,
|
|
148
|
+
then saves to disk. Transcodes to MP3/MP4 if ffmpeg is available and requested.
|
|
149
|
+
"""
|
|
150
|
+
|
|
151
|
+
def __init__(self, *args, **kwargs):
|
|
152
|
+
super().__init__()
|
|
153
|
+
self.signals = MusicSignals()
|
|
154
|
+
self.window = None
|
|
155
|
+
self.client: Optional[genai.Client] = None
|
|
156
|
+
self.ctx: Optional[CtxItem] = None
|
|
157
|
+
|
|
158
|
+
# inputs
|
|
159
|
+
self.model: str = "lyria-002"
|
|
160
|
+
self.input_prompt: str = ""
|
|
161
|
+
self.negative_prompt: Optional[str] = None
|
|
162
|
+
self.num: int = 1
|
|
163
|
+
self.seed: Optional[int] = None
|
|
164
|
+
self.out_format: str = "mp3" # wav | mp3 | mp4
|
|
165
|
+
|
|
166
|
+
# prompt improvement
|
|
167
|
+
self.model_prompt = None
|
|
168
|
+
self.system_prompt = ""
|
|
169
|
+
self.raw = False
|
|
170
|
+
|
|
171
|
+
# ui
|
|
172
|
+
self.inline: bool = False
|
|
173
|
+
|
|
174
|
+
@Slot()
|
|
175
|
+
def run(self):
|
|
176
|
+
try:
|
|
177
|
+
# Validate Vertex configuration first
|
|
178
|
+
if not self._using_vertex():
|
|
179
|
+
raise RuntimeError(
|
|
180
|
+
"Vertex AI is required for music (Lyria). Enable Vertex in settings and configure credentials."
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Optional prompt enhancement via LLM
|
|
184
|
+
if not self.raw and self.input_prompt:
|
|
185
|
+
try:
|
|
186
|
+
self.signals.status.emit(trans('vid.status.prompt.wait'))
|
|
187
|
+
bridge_context = BridgeContext(
|
|
188
|
+
prompt=self.input_prompt,
|
|
189
|
+
system_prompt=self.system_prompt,
|
|
190
|
+
model=self.model_prompt,
|
|
191
|
+
max_tokens=200,
|
|
192
|
+
temperature=1.0,
|
|
193
|
+
)
|
|
194
|
+
ev = KernelEvent(KernelEvent.CALL, {'context': bridge_context, 'extra': {}})
|
|
195
|
+
self.window.dispatch(ev)
|
|
196
|
+
resp = ev.data.get('response')
|
|
197
|
+
if resp:
|
|
198
|
+
self.input_prompt = resp
|
|
199
|
+
except Exception as e:
|
|
200
|
+
# non-fatal
|
|
201
|
+
self.signals.error.emit(e)
|
|
202
|
+
self.signals.status.emit(trans('vid.status.prompt.error') + ": " + str(e))
|
|
203
|
+
|
|
204
|
+
# Build request
|
|
205
|
+
project = os.getenv("GOOGLE_CLOUD_PROJECT", "")
|
|
206
|
+
location = os.getenv("GOOGLE_CLOUD_LOCATION", "us-central1")
|
|
207
|
+
model_id = self._normalize_model_id(self.model)
|
|
208
|
+
|
|
209
|
+
url = f"https://{location}-aiplatform.googleapis.com/v1/projects/{project}/locations/{location}/publishers/google/models/{model_id}:predict"
|
|
210
|
+
token = self._get_access_token()
|
|
211
|
+
|
|
212
|
+
headers = {
|
|
213
|
+
"Authorization": f"Bearer {token}",
|
|
214
|
+
"Content-Type": "application/json",
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
# sample_count vs seed (mutually exclusive per Lyria API)
|
|
218
|
+
instances: Dict[str, Any] = {
|
|
219
|
+
"prompt": self.input_prompt,
|
|
220
|
+
}
|
|
221
|
+
if self.negative_prompt:
|
|
222
|
+
instances["negative_prompt"] = self.negative_prompt
|
|
223
|
+
|
|
224
|
+
params: Dict[str, Any] = {}
|
|
225
|
+
if self.seed is not None and self.num > 1:
|
|
226
|
+
# Keep API valid: if seed is set, do not set sample_count
|
|
227
|
+
self.signals.status.emit("Seed provided; generating a single seeded sample (sample_count ignored).")
|
|
228
|
+
if self.seed is not None:
|
|
229
|
+
instances["seed"] = int(self.seed)
|
|
230
|
+
elif self.num > 1:
|
|
231
|
+
params["sample_count"] = int(self.num)
|
|
232
|
+
|
|
233
|
+
body = {
|
|
234
|
+
"instances": [instances],
|
|
235
|
+
"parameters": params,
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
self.signals.status.emit(trans('vid.status.generating') + f": {self.input_prompt}...")
|
|
239
|
+
|
|
240
|
+
# Call REST API
|
|
241
|
+
resp = requests.post(url, headers=headers, data=json.dumps(body), timeout=120)
|
|
242
|
+
if resp.status_code != 200:
|
|
243
|
+
raise RuntimeError(f"Lyria API error: {resp.status_code} {resp.text}")
|
|
244
|
+
|
|
245
|
+
data = resp.json() or {}
|
|
246
|
+
preds = data.get("predictions") or []
|
|
247
|
+
if not preds:
|
|
248
|
+
raise RuntimeError("No audio predictions returned.")
|
|
249
|
+
|
|
250
|
+
# Decode and save each audio clip
|
|
251
|
+
paths: List[str] = []
|
|
252
|
+
for idx, p in enumerate(preds):
|
|
253
|
+
# API returns base64-encoded WAV in audioContent
|
|
254
|
+
b64 = p.get("audioContent") or ""
|
|
255
|
+
mime = p.get("mimeType") or "audio/wav"
|
|
256
|
+
if not b64:
|
|
257
|
+
continue
|
|
258
|
+
wav_bytes = base64.b64decode(b64)
|
|
259
|
+
|
|
260
|
+
saved_path = self._save(idx, wav_bytes, mime)
|
|
261
|
+
if saved_path:
|
|
262
|
+
paths.append(saved_path)
|
|
263
|
+
|
|
264
|
+
if self.inline:
|
|
265
|
+
self.signals.finished_inline.emit(self.ctx, paths, self.input_prompt)
|
|
266
|
+
else:
|
|
267
|
+
self.signals.finished.emit(self.ctx, paths, self.input_prompt)
|
|
268
|
+
|
|
269
|
+
except Exception as e:
|
|
270
|
+
self.signals.error.emit(e)
|
|
271
|
+
finally:
|
|
272
|
+
self._cleanup()
|
|
273
|
+
|
|
274
|
+
# ---------- helpers ----------
|
|
275
|
+
|
|
276
|
+
def _using_vertex(self) -> bool:
|
|
277
|
+
"""Check if Vertex AI is active via env variable set by ApiGoogle.setup_env()."""
|
|
278
|
+
val = os.getenv("GOOGLE_GENAI_USE_VERTEXAI") or ""
|
|
279
|
+
return str(val).lower() in ("1", "true", "yes", "y")
|
|
280
|
+
|
|
281
|
+
def _normalize_model_id(self, model_id: str) -> str:
|
|
282
|
+
"""
|
|
283
|
+
Normalize model id to the publisher form 'lyria-002'.
|
|
284
|
+
Accepts inputs like 'lyria-002', 'models/lyria-002', or full resource names.
|
|
285
|
+
"""
|
|
286
|
+
if not model_id:
|
|
287
|
+
return "lyria-002"
|
|
288
|
+
# get trailing token after '/'
|
|
289
|
+
tail = model_id.split("/")[-1]
|
|
290
|
+
if tail.startswith("lyria"):
|
|
291
|
+
return tail
|
|
292
|
+
# fallback
|
|
293
|
+
return "lyria-002"
|
|
294
|
+
|
|
295
|
+
def _get_access_token(self) -> str:
|
|
296
|
+
"""
|
|
297
|
+
Obtain OAuth2 access token for Vertex AI (cloud-platform scope) using ADC.
|
|
298
|
+
"""
|
|
299
|
+
try:
|
|
300
|
+
import google.auth
|
|
301
|
+
from google.auth.transport.requests import Request as GRequest
|
|
302
|
+
creds, _ = google.auth.default(scopes=["https://www.googleapis.com/auth/cloud-platform"])
|
|
303
|
+
creds.refresh(GRequest())
|
|
304
|
+
return creds.token
|
|
305
|
+
except Exception as e:
|
|
306
|
+
# As a fallback, allow passing a pre-fetched token via env var.
|
|
307
|
+
token = os.getenv("VERTEX_AI_TOKEN") or os.getenv("GCP_ACCESS_TOKEN") or ""
|
|
308
|
+
if token:
|
|
309
|
+
return token
|
|
310
|
+
raise e
|
|
311
|
+
|
|
312
|
+
def _save(self, idx: int, wav_bytes: bytes, mime: str) -> Optional[str]:
|
|
313
|
+
"""
|
|
314
|
+
Save audio to disk. Primary save as WAV; then optionally transcode to MP3/MP4 if requested.
|
|
315
|
+
"""
|
|
316
|
+
# Build base filename
|
|
317
|
+
safe = self.window.core.video.make_safe_filename(self.input_prompt)
|
|
318
|
+
base_name = (
|
|
319
|
+
datetime.date.today().strftime("%Y-%m-%d") + "_" +
|
|
320
|
+
datetime.datetime.now().strftime("%H-%M-%S") + "-" +
|
|
321
|
+
safe + "-" + str(idx + 1)
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# Directories: prefer 'audio' user dir if available, else fallback to 'video'
|
|
325
|
+
try:
|
|
326
|
+
out_dir = self.window.core.config.get_user_dir("audio")
|
|
327
|
+
except Exception:
|
|
328
|
+
out_dir = self.window.core.config.get_user_dir("video")
|
|
329
|
+
|
|
330
|
+
os.makedirs(out_dir, exist_ok=True)
|
|
331
|
+
|
|
332
|
+
# Always persist a WAV first (what API returns)
|
|
333
|
+
wav_path = os.path.join(out_dir, base_name + ".wav")
|
|
334
|
+
self.signals.status.emit(trans('vid.status.downloading') + f" ({idx + 1} / {max(1, self.num)}) -> {wav_path}")
|
|
335
|
+
with open(wav_path, "wb") as f:
|
|
336
|
+
f.write(wav_bytes)
|
|
337
|
+
|
|
338
|
+
# Transcode if needed
|
|
339
|
+
fmt = (self.out_format or "mp3").lower()
|
|
340
|
+
if fmt == "wav":
|
|
341
|
+
return wav_path
|
|
342
|
+
|
|
343
|
+
ffmpeg = shutil.which("ffmpeg") or shutil.which("ffmpeg.exe")
|
|
344
|
+
if not ffmpeg:
|
|
345
|
+
# No ffmpeg -> keep WAV
|
|
346
|
+
self.signals.status.emit("ffmpeg not found. Saved WAV output only.")
|
|
347
|
+
return wav_path
|
|
348
|
+
|
|
349
|
+
if fmt == "mp3":
|
|
350
|
+
out_path = os.path.join(out_dir, base_name + ".mp3")
|
|
351
|
+
cmd = [ffmpeg, "-y", "-i", wav_path, "-vn", "-codec:a", "libmp3lame", "-b:a", "192k", out_path]
|
|
352
|
+
elif fmt == "mp4":
|
|
353
|
+
out_path = os.path.join(out_dir, base_name + ".mp4")
|
|
354
|
+
# audio-only MP4 (AAC)
|
|
355
|
+
cmd = [ffmpeg, "-y", "-i", wav_path, "-vn", "-c:a", "aac", "-b:a", "192k", out_path]
|
|
356
|
+
else:
|
|
357
|
+
# Unknown format -> keep WAV
|
|
358
|
+
return wav_path
|
|
359
|
+
|
|
360
|
+
try:
|
|
361
|
+
subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
|
|
362
|
+
return out_path
|
|
363
|
+
except Exception:
|
|
364
|
+
# If transcoding fails, keep WAV
|
|
365
|
+
return wav_path
|
|
366
|
+
|
|
367
|
+
def _cleanup(self):
|
|
368
|
+
"""Cleanup signals object."""
|
|
369
|
+
sig = self.signals
|
|
370
|
+
self.signals = None
|
|
371
|
+
if sig is not None:
|
|
372
|
+
try:
|
|
373
|
+
sig.deleteLater()
|
|
374
|
+
except RuntimeError:
|
|
375
|
+
pass
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
pygpt_net/provider/llms/base.py
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
pygpt_net/provider/llms/x_ai.py
CHANGED
|
File without changes
|
|
@@ -6,11 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.09.
|
|
9
|
+
# Updated Date: 2025.09.27 09:30:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import sys
|
|
13
|
-
from PySide6.QtCore import Qt
|
|
13
|
+
from PySide6.QtCore import Qt, QTimer
|
|
14
14
|
from PySide6.QtWidgets import QDialog, QLabel, QHBoxLayout, QVBoxLayout, QPushButton
|
|
15
15
|
|
|
16
16
|
from pygpt_net.utils import trans
|
|
@@ -44,11 +44,7 @@ class ConfirmDialog(QDialog):
|
|
|
44
44
|
|
|
45
45
|
# Always make the neutral action (No/Cancel) the default/active one.
|
|
46
46
|
# This ensures Enter triggers the safe option by default.
|
|
47
|
-
self.
|
|
48
|
-
self.window.ui.nodes['dialog.confirm.btn.no'].setDefault(True)
|
|
49
|
-
self.window.ui.nodes['dialog.confirm.btn.no'].setFocus()
|
|
50
|
-
self.window.ui.nodes['dialog.confirm.btn.yes'].setAutoDefault(False)
|
|
51
|
-
self.window.ui.nodes['dialog.confirm.btn.yes'].setDefault(False)
|
|
47
|
+
self._apply_neutral_default()
|
|
52
48
|
|
|
53
49
|
# Bottom button row with platform-specific ordering
|
|
54
50
|
# Windows: affirmative on the left, neutral on the right
|
|
@@ -85,4 +81,34 @@ class ConfirmDialog(QDialog):
|
|
|
85
81
|
:param event: close event
|
|
86
82
|
"""
|
|
87
83
|
self.window.controller.dialogs.confirm.dismiss(self.type, self.id)
|
|
88
|
-
super(ConfirmDialog, self).closeEvent(event)
|
|
84
|
+
super(ConfirmDialog, self).closeEvent(event)
|
|
85
|
+
|
|
86
|
+
def showEvent(self, event):
|
|
87
|
+
"""
|
|
88
|
+
Ensure neutral button is default/active on every show.
|
|
89
|
+
|
|
90
|
+
Using a single-shot timer defers focus/default restoration until
|
|
91
|
+
after the dialog becomes visible, which prevents focus being stolen
|
|
92
|
+
by the window manager.
|
|
93
|
+
"""
|
|
94
|
+
super(ConfirmDialog, self).showEvent(event)
|
|
95
|
+
QTimer.singleShot(0, self._apply_neutral_default)
|
|
96
|
+
|
|
97
|
+
def _apply_neutral_default(self):
|
|
98
|
+
"""
|
|
99
|
+
Set the neutral action (No/Cancel) as default and active.
|
|
100
|
+
Always called on construction and each time the dialog is shown.
|
|
101
|
+
"""
|
|
102
|
+
btn_no = self.window.ui.nodes.get('dialog.confirm.btn.no')
|
|
103
|
+
btn_yes = self.window.ui.nodes.get('dialog.confirm.btn.yes')
|
|
104
|
+
if not btn_no or not btn_yes:
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
# Make sure affirmative button cannot become default by leftover state
|
|
108
|
+
btn_yes.setAutoDefault(False)
|
|
109
|
+
btn_yes.setDefault(False)
|
|
110
|
+
|
|
111
|
+
# Make neutral (No/Cancel) the active default and take focus
|
|
112
|
+
btn_no.setAutoDefault(True)
|
|
113
|
+
btn_no.setDefault(True)
|
|
114
|
+
btn_no.setFocus()
|
|
@@ -11,26 +11,118 @@
|
|
|
11
11
|
|
|
12
12
|
from PySide6.QtCore import Qt
|
|
13
13
|
from PySide6.QtWidgets import QHBoxLayout, QWidget, QComboBox
|
|
14
|
-
from PySide6.QtGui import QFontMetrics
|
|
14
|
+
from PySide6.QtGui import QFontMetrics, QStandardItem, QStandardItemModel # keep existing imports, extend with items
|
|
15
15
|
|
|
16
16
|
from pygpt_net.utils import trans
|
|
17
17
|
|
|
18
18
|
class SeparatorComboBox(QComboBox):
|
|
19
|
-
"""A combo box that supports adding separator items."""
|
|
19
|
+
"""A combo box that supports adding separator items and prevents selecting them."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, parent=None):
|
|
22
|
+
super().__init__(parent)
|
|
23
|
+
# Custom role used to mark separator rows without interfering with existing UserRole data
|
|
24
|
+
self._SEP_ROLE = Qt.UserRole + 1000
|
|
25
|
+
# Internal guard to avoid recursive index changes
|
|
26
|
+
self._block_guard = False
|
|
20
27
|
|
|
21
28
|
def addSeparator(self, text):
|
|
22
29
|
"""
|
|
23
|
-
Adds a separator item to the combo box.
|
|
30
|
+
Adds a separator item to the combo box that cannot be selected.
|
|
31
|
+
This keeps separators visible but disabled/unselectable.
|
|
24
32
|
|
|
25
33
|
:param text: The text to display for the separator.
|
|
26
34
|
"""
|
|
27
|
-
|
|
28
|
-
|
|
35
|
+
model = self.model()
|
|
36
|
+
if isinstance(model, QStandardItemModel):
|
|
37
|
+
item = QStandardItem(text)
|
|
38
|
+
# Disable and make the row unselectable
|
|
39
|
+
item.setFlags(item.flags() & ~Qt.ItemIsEnabled & ~Qt.ItemIsSelectable)
|
|
40
|
+
# Mark explicitly as separator using custom role
|
|
41
|
+
item.setData(True, self._SEP_ROLE)
|
|
42
|
+
model.appendRow(item)
|
|
43
|
+
else:
|
|
44
|
+
# Fallback: keep previous behavior and additionally tag item with custom role
|
|
45
|
+
index = self.count()
|
|
46
|
+
self.addItem(text)
|
|
47
|
+
try:
|
|
48
|
+
role = Qt.UserRole - 1
|
|
49
|
+
self.setItemData(index, 0, role) # legacy approach used sometimes to indicate non-enabled
|
|
50
|
+
except Exception:
|
|
51
|
+
pass
|
|
52
|
+
# Tag as separator via custom role for later checks
|
|
53
|
+
self.setItemData(index, True, self._SEP_ROLE)
|
|
54
|
+
|
|
55
|
+
def is_separator(self, index: int) -> bool:
|
|
56
|
+
"""Returns True if item at index is a separator."""
|
|
57
|
+
if index < 0 or index >= self.count():
|
|
58
|
+
return False
|
|
29
59
|
try:
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
except:
|
|
60
|
+
if self.itemData(index, self._SEP_ROLE):
|
|
61
|
+
return True
|
|
62
|
+
except Exception:
|
|
33
63
|
pass
|
|
64
|
+
# Fallback: check flags (works with item models)
|
|
65
|
+
try:
|
|
66
|
+
idx = self.model().index(index, self.modelColumn(), self.rootModelIndex())
|
|
67
|
+
flags = self.model().flags(idx)
|
|
68
|
+
if not (flags & Qt.ItemIsEnabled) or not (flags & Qt.ItemIsSelectable):
|
|
69
|
+
return True
|
|
70
|
+
except Exception:
|
|
71
|
+
pass
|
|
72
|
+
return False
|
|
73
|
+
|
|
74
|
+
def first_valid_index(self) -> int:
|
|
75
|
+
"""Returns the first non-separator index, or -1 if none."""
|
|
76
|
+
for i in range(self.count()):
|
|
77
|
+
if not self.is_separator(i):
|
|
78
|
+
return i
|
|
79
|
+
return -1
|
|
80
|
+
|
|
81
|
+
def _sanitize_index(self, index: int) -> int:
|
|
82
|
+
"""Returns a corrected non-separator index, or -1 if none available."""
|
|
83
|
+
if index is None:
|
|
84
|
+
index = -1
|
|
85
|
+
if index < 0 or index >= self.count():
|
|
86
|
+
return self.first_valid_index()
|
|
87
|
+
if self.is_separator(index):
|
|
88
|
+
# Prefer the next valid item; if none, scan backwards; else -1
|
|
89
|
+
for i in range(index + 1, self.count()):
|
|
90
|
+
if not self.is_separator(i):
|
|
91
|
+
return i
|
|
92
|
+
for i in range(index - 1, -1, -1):
|
|
93
|
+
if not self.is_separator(i):
|
|
94
|
+
return i
|
|
95
|
+
return -1
|
|
96
|
+
return index
|
|
97
|
+
|
|
98
|
+
def ensure_valid_current(self) -> int:
|
|
99
|
+
"""
|
|
100
|
+
Ensures the current index is not a separator.
|
|
101
|
+
Returns the final valid index (or -1) after correction.
|
|
102
|
+
"""
|
|
103
|
+
current = super().currentIndex()
|
|
104
|
+
corrected = self._sanitize_index(current)
|
|
105
|
+
if corrected != current:
|
|
106
|
+
try:
|
|
107
|
+
self._block_guard = True
|
|
108
|
+
super().setCurrentIndex(corrected if corrected != -1 else -1)
|
|
109
|
+
finally:
|
|
110
|
+
self._block_guard = False
|
|
111
|
+
return corrected
|
|
112
|
+
|
|
113
|
+
def setCurrentIndex(self, index: int) -> None:
|
|
114
|
+
"""
|
|
115
|
+
Prevent setting the current index to a separator from any caller.
|
|
116
|
+
"""
|
|
117
|
+
if self._block_guard:
|
|
118
|
+
# When guarded, pass through without checks to avoid recursion
|
|
119
|
+
return super().setCurrentIndex(index)
|
|
120
|
+
corrected = self._sanitize_index(index)
|
|
121
|
+
try:
|
|
122
|
+
self._block_guard = True
|
|
123
|
+
super().setCurrentIndex(corrected if corrected != -1 else -1)
|
|
124
|
+
finally:
|
|
125
|
+
self._block_guard = False
|
|
34
126
|
|
|
35
127
|
|
|
36
128
|
class NoScrollCombo(SeparatorComboBox):
|
|
@@ -116,7 +208,11 @@ class OptionCombo(QWidget):
|
|
|
116
208
|
else:
|
|
117
209
|
self.combo.addItem(value, key)
|
|
118
210
|
else:
|
|
119
|
-
|
|
211
|
+
# Support simple string keys including "separator::" entries
|
|
212
|
+
if isinstance(item, str) and item.startswith("separator::"):
|
|
213
|
+
self.combo.addSeparator(item.split("separator::", 1)[1])
|
|
214
|
+
else:
|
|
215
|
+
self.combo.addItem(item, item)
|
|
120
216
|
elif type(self.keys) is dict:
|
|
121
217
|
for key, value in self.keys.items():
|
|
122
218
|
if not isinstance(key, str):
|
|
@@ -126,6 +222,32 @@ class OptionCombo(QWidget):
|
|
|
126
222
|
else:
|
|
127
223
|
self.combo.addItem(value, key)
|
|
128
224
|
|
|
225
|
+
# Ensure a valid non-separator selection after population
|
|
226
|
+
self._apply_initial_selection()
|
|
227
|
+
|
|
228
|
+
def _apply_initial_selection(self):
|
|
229
|
+
"""
|
|
230
|
+
Ensures that after building the list the combobox does not end up on a separator.
|
|
231
|
+
Prefers self.current_id if present; otherwise selects the first valid non-separator.
|
|
232
|
+
Signals are suppressed during this operation.
|
|
233
|
+
"""
|
|
234
|
+
# lock on_change during initial selection
|
|
235
|
+
prev_locked = self.locked
|
|
236
|
+
self.locked = True
|
|
237
|
+
try:
|
|
238
|
+
index = -1
|
|
239
|
+
if self.current_id is not None and self.current_id != "":
|
|
240
|
+
index = self.combo.findData(self.current_id)
|
|
241
|
+
if index == -1:
|
|
242
|
+
index = self.combo.first_valid_index()
|
|
243
|
+
if index != -1:
|
|
244
|
+
self.combo.setCurrentIndex(index)
|
|
245
|
+
else:
|
|
246
|
+
# No valid items, clear selection
|
|
247
|
+
self.combo.setCurrentIndex(-1)
|
|
248
|
+
finally:
|
|
249
|
+
self.locked = prev_locked
|
|
250
|
+
|
|
129
251
|
def set_value(self, value):
|
|
130
252
|
"""
|
|
131
253
|
Set value
|
|
@@ -137,6 +259,9 @@ class OptionCombo(QWidget):
|
|
|
137
259
|
index = self.combo.findData(value)
|
|
138
260
|
if index != -1:
|
|
139
261
|
self.combo.setCurrentIndex(index)
|
|
262
|
+
else:
|
|
263
|
+
# If requested value is not present, keep current selection but make sure it is valid.
|
|
264
|
+
self.combo.ensure_valid_current()
|
|
140
265
|
|
|
141
266
|
def get_value(self):
|
|
142
267
|
"""
|
|
@@ -159,6 +284,8 @@ class OptionCombo(QWidget):
|
|
|
159
284
|
self.option["keys"] = keys
|
|
160
285
|
self.combo.clear()
|
|
161
286
|
self.update()
|
|
287
|
+
# After rebuilding, guarantee a non-separator selection
|
|
288
|
+
self.combo.ensure_valid_current()
|
|
162
289
|
if lock:
|
|
163
290
|
self.locked = False
|
|
164
291
|
|
|
@@ -171,10 +298,21 @@ class OptionCombo(QWidget):
|
|
|
171
298
|
"""
|
|
172
299
|
if self.locked:
|
|
173
300
|
return
|
|
301
|
+
|
|
302
|
+
# If somehow a separator got focus, correct it immediately and do not propagate invalid IDs
|
|
303
|
+
if self.combo.is_separator(index):
|
|
304
|
+
self.locked = True
|
|
305
|
+
corrected = self.combo.ensure_valid_current()
|
|
306
|
+
self.locked = False
|
|
307
|
+
if corrected == -1:
|
|
308
|
+
# Nothing valid to select
|
|
309
|
+
self.current_id = None
|
|
310
|
+
return
|
|
311
|
+
index = corrected
|
|
312
|
+
|
|
174
313
|
self.current_id = self.combo.itemData(index)
|
|
175
314
|
self.window.controller.config.combo.on_update(self.parent_id, self.id, self.option, self.current_id)
|
|
176
315
|
|
|
177
316
|
def fit_to_content(self):
|
|
178
317
|
"""Fit to content"""
|
|
179
|
-
self.combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)
|
|
180
|
-
|
|
318
|
+
self.combo.setSizeAdjustPolicy(QComboBox.AdjustToContents)
|