pygpt-net 2.7.2__py3-none-any.whl → 2.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,13 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.11 14:00:00 #
9
+ # Updated Date: 2025.12.30 22:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
13
13
  import datetime
14
14
  import os
15
- from typing import Optional, Dict, Any
15
+ from typing import Optional, Dict, Any, List
16
16
 
17
17
  import requests
18
18
 
@@ -51,12 +51,14 @@ class Image:
51
51
  :param extra: Extra arguments
52
52
  :param sync: Synchronous mode
53
53
  """
54
+ extra = extra or {}
54
55
  prompt = context.prompt
55
56
  ctx = context.ctx
56
57
  model = context.model
57
58
  num = extra.get("num", 1)
58
59
  inline = extra.get("inline", False)
59
60
  sub_mode = self.MODE_GENERATE
61
+ image_id = extra.get("image_id") # previous image reference for remix
60
62
 
61
63
  # if attachments then switch mode to EDIT
62
64
  attachments = context.attachments
@@ -85,6 +87,7 @@ class Image:
85
87
  self.worker.system_prompt = self.window.core.prompt.get('img')
86
88
  self.worker.num = num
87
89
  self.worker.inline = inline
90
+ self.worker.image_id = image_id # remix: previous image path/identifier
88
91
 
89
92
  # config
90
93
  if self.window.core.config.has('img_quality'):
@@ -131,22 +134,24 @@ class ImageWorker(QRunnable):
131
134
  self.kwargs = kwargs
132
135
  self.window = None
133
136
  self.client = None
134
- self.ctx = None
137
+ self.ctx: Optional[CtxItem] = None
135
138
  self.raw = False
136
139
  self.mode = Image.MODE_GENERATE # default mode is generate
137
140
  self.model = "dall-e-3"
138
141
  self.quality = "standard"
139
142
  self.resolution = "1792x1024"
140
- self.attachments = {} # attachments for edit mode
143
+ self.attachments: Dict[str, Any] = {} # attachments for edit mode
141
144
  self.model_prompt = None
142
- self.input_prompt = None
145
+ self.input_prompt: Optional[str] = None
143
146
  self.system_prompt = None
144
147
  self.inline = False
145
148
  self.num = 1
149
+ self.image_id: Optional[str] = None # previous image reference for remix
150
+
151
+ # legacy maps kept for backwards compatibility (dall-e-2 / dall-e-3 exact ids)
146
152
  self.allowed_max_num = {
147
153
  "dall-e-2": 4,
148
154
  "dall-e-3": 1,
149
- "gpt-image-1": 1,
150
155
  }
151
156
  self.allowed_resolutions = {
152
157
  "dall-e-2": [
@@ -159,29 +164,60 @@ class ImageWorker(QRunnable):
159
164
  "1024x1792",
160
165
  "1024x1024",
161
166
  ],
162
- "gpt-image-1": [
163
- "1536x1024",
164
- "1024x1536",
165
- "1024x1024",
166
- "auto",
167
- ],
168
167
  }
169
168
  self.allowed_quality = {
170
- "dall-e-2": [
171
- "standard",
172
- ],
173
- "dall-e-3": [
174
- "standard",
175
- "hd",
176
- ],
177
- "gpt-image-1": [
178
- "auto",
179
- "high",
180
- "medium",
181
- "low",
182
- ],
169
+ "dall-e-2": ["standard"],
170
+ "dall-e-3": ["standard", "hd"],
183
171
  }
184
172
 
173
+ # ---------- model helpers ----------
174
+
175
+ def _is_gpt_image_model(self, model_id: Optional[str] = None) -> bool:
176
+ mid = (model_id or self.model or "").lower()
177
+ return mid.startswith("gpt-image-1")
178
+
179
+ def _is_dalle2(self, model_id: Optional[str] = None) -> bool:
180
+ mid = (model_id or self.model or "").lower()
181
+ return mid == "dall-e-2"
182
+
183
+ def _is_dalle3(self, model_id: Optional[str] = None) -> bool:
184
+ mid = (model_id or self.model or "").lower()
185
+ return mid == "dall-e-3"
186
+
187
+ def _max_num_for_model(self) -> int:
188
+ if self._is_gpt_image_model():
189
+ return 1
190
+ if self._is_dalle2():
191
+ return self.allowed_max_num["dall-e-2"]
192
+ if self._is_dalle3():
193
+ return self.allowed_max_num["dall-e-3"]
194
+ return 1
195
+
196
+ def _normalize_resolution_for_model(self, resolution: Optional[str]) -> str:
197
+ res = (resolution or "").strip() or "1024x1024"
198
+ if self._is_gpt_image_model():
199
+ allowed = {"1024x1024", "1536x1024", "1024x1536", "auto"}
200
+ return res if res in allowed else "auto"
201
+ if self._is_dalle2():
202
+ allowed = set(self.allowed_resolutions["dall-e-2"])
203
+ return res if res in allowed else "1024x1024"
204
+ if self._is_dalle3():
205
+ allowed = set(self.allowed_resolutions["dall-e-3"])
206
+ return res if res in allowed else "1024x1024"
207
+ return res
208
+
209
+ def _normalize_quality_for_model(self, quality: Optional[str]) -> Optional[str]:
210
+ q = (quality or "").strip().lower()
211
+ if self._is_gpt_image_model():
212
+ allowed = {"auto", "high", "medium", "low"}
213
+ return q if q in allowed else "auto"
214
+ if self._is_dalle2():
215
+ return "standard"
216
+ if self._is_dalle3():
217
+ allowed = {"standard", "hd"}
218
+ return q if q in allowed else "standard"
219
+ return None
220
+
185
221
  @Slot()
186
222
  def run(self):
187
223
  """Run worker"""
@@ -211,53 +247,93 @@ class ImageWorker(QRunnable):
211
247
 
212
248
  self.signals.status.emit(trans('img.status.generating') + ": {}...".format(self.input_prompt))
213
249
 
214
- paths = [] # downloaded images paths
250
+ paths: List[str] = [] # downloaded images paths
215
251
  try:
216
- # check if number of images is supported
217
- if self.model in self.allowed_max_num:
218
- if self.num > self.allowed_max_num[self.model]:
219
- self.num = self.allowed_max_num[self.model]
220
-
221
- # check if resolution is supported
222
- resolution = self.resolution
223
- if self.model in self.allowed_resolutions:
224
- if resolution not in self.allowed_resolutions[self.model]:
225
- resolution = self.allowed_resolutions[self.model][0]
226
-
227
- quality = self.quality
228
- if self.model in self.allowed_quality:
229
- if quality not in self.allowed_quality[self.model]:
230
- quality = self.allowed_quality[self.model][0]
231
-
232
- # send to API
252
+ # enforce model-specific limits/options
253
+ self.num = min(max(1, int(self.num or 1)), self._max_num_for_model())
254
+ resolution = self._normalize_resolution_for_model(self.resolution)
255
+ quality = self._normalize_quality_for_model(self.quality)
256
+
233
257
  response = None
234
- if self.mode == Image.MODE_GENERATE:
235
- if self.model == "dall-e-2":
236
- response = self.client.images.generate(
237
- model=self.model,
238
- prompt=self.input_prompt,
239
- n=self.num,
240
- size=resolution,
241
- )
242
- elif self.model == "dall-e-3" or self.model == "gpt-image-1":
243
- response = self.client.images.generate(
244
- model=self.model,
245
- prompt=self.input_prompt,
246
- n=self.num,
247
- quality=quality,
248
- size=resolution,
249
- )
250
- elif self.mode == Image.MODE_EDIT:
251
- images = []
252
- for uuid in self.attachments:
253
- attachment = self.attachments[uuid]
254
- if attachment.path and os.path.exists(attachment.path):
255
- images.append(open(attachment.path, "rb"))
256
- response = self.client.images.edit(
257
- model=self.model,
258
- image=images,
259
- prompt=self.input_prompt,
260
- )
258
+
259
+ # Remix path: if image_id provided, prefer editing with previous image reference
260
+ if self.image_id:
261
+ if self._is_dalle3():
262
+ try:
263
+ self.signals.status.emit("Remix is not supported for this model; generating a new image.")
264
+ except Exception:
265
+ pass
266
+ else:
267
+ remix_images = []
268
+ try:
269
+ if isinstance(self.image_id, str) and os.path.exists(self.image_id):
270
+ remix_images.append(open(self.image_id, "rb"))
271
+ except Exception:
272
+ remix_images = []
273
+
274
+ if len(remix_images) > 0:
275
+ try:
276
+ edit_kwargs = {
277
+ "model": self.model,
278
+ "image": remix_images,
279
+ "prompt": self.input_prompt,
280
+ "n": self.num,
281
+ "size": resolution,
282
+ }
283
+ if self._is_gpt_image_model() or self._is_dalle3():
284
+ if quality:
285
+ edit_kwargs["quality"] = quality
286
+ response = self.client.images.edit(**edit_kwargs)
287
+ finally:
288
+ for f in remix_images:
289
+ try:
290
+ f.close()
291
+ except Exception:
292
+ pass
293
+
294
+ # Normal API paths when remix not executed or unsupported
295
+ if response is None:
296
+ if self.mode == Image.MODE_GENERATE:
297
+ if self._is_dalle2():
298
+ response = self.client.images.generate(
299
+ model=self.model,
300
+ prompt=self.input_prompt,
301
+ n=self.num,
302
+ size=resolution,
303
+ )
304
+ else:
305
+ gen_kwargs = {
306
+ "model": self.model,
307
+ "prompt": self.input_prompt,
308
+ "n": self.num,
309
+ "size": resolution,
310
+ }
311
+ if (self._is_gpt_image_model() or self._is_dalle3()) and quality:
312
+ gen_kwargs["quality"] = quality
313
+ response = self.client.images.generate(**gen_kwargs)
314
+ elif self.mode == Image.MODE_EDIT:
315
+ images = []
316
+ for uuid in self.attachments or {}:
317
+ attachment = self.attachments[uuid]
318
+ if attachment.path and os.path.exists(attachment.path):
319
+ images.append(open(attachment.path, "rb"))
320
+ try:
321
+ edit_kwargs = {
322
+ "model": self.model,
323
+ "image": images,
324
+ "prompt": self.input_prompt,
325
+ "n": self.num,
326
+ "size": resolution,
327
+ }
328
+ if (self._is_gpt_image_model() or self._is_dalle3()) and quality:
329
+ edit_kwargs["quality"] = quality
330
+ response = self.client.images.edit(**edit_kwargs)
331
+ finally:
332
+ for f in images:
333
+ try:
334
+ f.close()
335
+ except Exception:
336
+ pass
261
337
 
262
338
  # check response
263
339
  if response is None:
@@ -278,14 +354,13 @@ class ImageWorker(QRunnable):
278
354
  msg = trans('img.status.downloading') + " (" + str(i + 1) + " / " + str(self.num) + ") -> " + str(path)
279
355
  self.signals.status.emit(msg)
280
356
 
281
- if response.data[i] is None:
282
- self.signals.error.emit("API Error: empty image data")
283
- return
284
- if response.data[i].url: # dall-e 2 and 3 returns URL
285
- res = requests.get(response.data[i].url)
357
+ item = response.data[i]
358
+ data = None
359
+ if getattr(item, "url", None):
360
+ res = requests.get(item.url)
286
361
  data = res.content
287
- else: # gpt-image-1 returns base64 encoded image
288
- data = base64.b64decode(response.data[i].b64_json)
362
+ elif getattr(item, "b64_json", None):
363
+ data = base64.b64decode(item.b64_json)
289
364
 
290
365
  # save image
291
366
  if data and self.window.core.image.save_image(path, data):
@@ -293,6 +368,16 @@ class ImageWorker(QRunnable):
293
368
  else:
294
369
  self.signals.error.emit("Error saving image")
295
370
 
371
+ # store image_id for future remix (use first saved path as reference)
372
+ if paths:
373
+ try:
374
+ if not isinstance(self.ctx.extra, dict):
375
+ self.ctx.extra = {}
376
+ self.ctx.extra["image_id"] = paths[0]
377
+ self.window.core.ctx.update_item(self.ctx)
378
+ except Exception:
379
+ pass
380
+
296
381
  # send finished signal
297
382
  if self.inline:
298
383
  self.signals.finished_inline.emit( # separated signal for inline mode
@@ -322,4 +407,4 @@ class ImageWorker(QRunnable):
322
407
  try:
323
408
  sig.deleteLater()
324
409
  except RuntimeError:
325
- pass
410
+ pass
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.26 12:00:00 #
9
+ # Updated Date: 2025.12.30 22:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import datetime
@@ -59,6 +59,7 @@ class Video:
59
59
  prompt = context.prompt
60
60
  num = int(extra.get("num", 1))
61
61
  inline = bool(extra.get("inline", False))
62
+ video_id = extra.get("video_id")
62
63
 
63
64
  # decide sub-mode based on attachments (image-to-video when image is attached)
64
65
  sub_mode = self.MODE_GENERATE
@@ -85,6 +86,7 @@ class Video:
85
86
  worker.raw = self.window.core.config.get('img_raw')
86
87
  worker.num = num
87
88
  worker.inline = inline
89
+ worker.video_id = video_id
88
90
 
89
91
  # optional params (app-level options)
90
92
  worker.aspect_ratio = str(extra.get("aspect_ratio") or self.window.core.config.get('video.aspect_ratio') or "16:9")
@@ -155,6 +157,7 @@ class VideoWorker(QRunnable):
155
157
  self.input_prompt = ""
156
158
  self.system_prompt = ""
157
159
  self.inline = False
160
+ self.video_id = None
158
161
  self.raw = False
159
162
  self.num = 1
160
163
 
@@ -169,6 +172,7 @@ class VideoWorker(QRunnable):
169
172
  @Slot()
170
173
  def run(self):
171
174
  try:
175
+ kernel = self.window.controller.kernel
172
176
  # Optional prompt enhancement via app default LLM
173
177
  if not self.raw and not self.inline and self.input_prompt:
174
178
  try:
@@ -199,42 +203,88 @@ class VideoWorker(QRunnable):
199
203
  # Image-to-video: first image attachment as input_reference, must match "size"
200
204
  image_path = self._first_image_attachment(self.attachments) if self.mode == Video.MODE_IMAGE_TO_VIDEO else None
201
205
 
202
- self.signals.status.emit(trans('vid.status.generating') + f": {self.input_prompt}...")
206
+ # If remix requested, ignore any image input_reference
207
+ is_remix = bool(self.video_id)
208
+ if is_remix:
209
+ image_path = None # enforce remix over image-to-video
210
+
211
+ label = trans('vid.status.generating')
212
+ if is_remix:
213
+ label += " (remix)"
214
+ self.signals.status.emit(label + f": {self.input_prompt}...")
203
215
 
204
216
  # Create job
205
- create_kwargs: Dict[str, Any] = {
206
- "model": self.model or "sora-2",
207
- "prompt": self.input_prompt or "",
208
- "seconds": str(seconds),
209
- "size": size,
210
- }
211
-
212
- # Attach image as input_reference; auto-resize to match requested size if needed
217
+ job = None
213
218
  file_handle = None
214
- if image_path:
215
- prepared = self._prepare_input_reference(image_path, size)
216
- if prepared is not None:
217
- # tuple (filename, bytes, mime) supported by OpenAI Python SDK
218
- create_kwargs["input_reference"] = prepared
219
- else:
220
- # Fallback: use original file (may fail if size/mime mismatch)
219
+
220
+ if is_remix:
221
+ # Primary path: dedicated remix endpoint, inherits the original's length and size
222
+ last_exc = None
223
+ try:
224
+ job = self.client.videos.remix(
225
+ video_id=str(self.video_id),
226
+ prompt=self.input_prompt or "",
227
+ )
228
+ except Exception as e1:
229
+ # Fallbacks for older SDKs: pass remix id via create()
230
+ last_exc = e1
221
231
  try:
222
- file_handle = open(image_path, "rb")
223
- create_kwargs["input_reference"] = file_handle
224
- except Exception as e:
225
- self.signals.error.emit(e)
232
+ job = self.client.videos.create(
233
+ model=self.model or "sora-2",
234
+ prompt=self.input_prompt or "",
235
+ remix_id=str(self.video_id),
236
+ )
237
+ except Exception as e2:
238
+ last_exc = e2
239
+ try:
240
+ job = self.client.videos.create(
241
+ model=self.model or "sora-2",
242
+ prompt=self.input_prompt or "",
243
+ remix_video_id=str(self.video_id),
244
+ )
245
+ except Exception as e3:
246
+ last_exc = e3
247
+ if job is None:
248
+ raise last_exc or RuntimeError("Unable to start remix job.")
249
+ else:
250
+ create_kwargs: Dict[str, Any] = {
251
+ "model": self.model or "sora-2",
252
+ "prompt": self.input_prompt or "",
253
+ "seconds": str(seconds),
254
+ "size": size,
255
+ }
256
+
257
+ # Attach image as input_reference; auto-resize to match requested size if needed
258
+ if image_path:
259
+ prepared = self._prepare_input_reference(image_path, size)
260
+ if prepared is not None:
261
+ create_kwargs["input_reference"] = prepared
262
+ else:
263
+ try:
264
+ file_handle = open(image_path, "rb")
265
+ create_kwargs["input_reference"] = file_handle
266
+ except Exception as e:
267
+ self.signals.error.emit(e)
268
+
269
+ job = self.client.videos.create(**create_kwargs)
226
270
 
227
- job = self.client.videos.create(**create_kwargs)
228
271
  video_id = self._safe_get(job, "id")
229
272
  if not video_id:
230
- # include raw payload for debugging
231
273
  raise RuntimeError("Video job ID missing in create response.")
232
274
 
233
275
  # Poll until completed (or failed/canceled)
276
+ if not isinstance(self.ctx.extra, dict):
277
+ self.ctx.extra = {}
278
+ self.ctx.extra['video_id'] = video_id # store video_id in ctx extra
279
+ self.window.core.ctx.update_item(self.ctx)
234
280
  last_progress = None
235
281
  last_status = None
236
282
  while True:
283
+ if kernel.stopped():
284
+ break
237
285
  time.sleep(5)
286
+ if kernel.stopped():
287
+ break
238
288
  job = self.client.videos.retrieve(video_id)
239
289
  status = self._safe_get(job, "status") or ""
240
290
  progress = self._safe_get(job, "progress")
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.27 02:00:00 #
9
+ # Updated Date: 2025.12.30 22:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -207,6 +207,15 @@ class Patch:
207
207
  patch_css('style.dark.css', True)
208
208
  updated = True
209
209
 
210
+ # < 2.7.3
211
+ if old < parse_version("2.7.3"):
212
+ print("Migrating config from < 2.7.3...")
213
+ if "video.remix" not in data:
214
+ data["video.remix"] = False
215
+ if "img.remix" not in data:
216
+ data["img.remix"] = False
217
+ updated = True
218
+
210
219
  # update file
211
220
  migrated = False
212
221
  if updated:
File without changes
@@ -6,13 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.28 18:00:00 #
9
+ # Updated Date: 2025.12.30 22:00:00 #
10
10
  # ================================================== #
11
11
 
12
- from PySide6.QtWidgets import QVBoxLayout, QWidget
12
+ from PySide6.QtWidgets import QVBoxLayout, QWidget, QCheckBox, QHBoxLayout
13
13
 
14
14
  from pygpt_net.ui.widget.option.combo import OptionCombo
15
+ from pygpt_net.ui.widget.option.input import OptionInput
15
16
  from pygpt_net.ui.widget.option.slider import OptionSlider
17
+ from pygpt_net.utils import trans
16
18
 
17
19
 
18
20
  class Image:
@@ -33,33 +35,41 @@ class Image:
33
35
  """
34
36
  option = {
35
37
  "type": "int",
36
- "slider": True,
37
38
  "label": "img_variants",
38
39
  "min": 1,
39
40
  "max": 4,
40
- "step": 1,
41
41
  "value": 1,
42
- "multiplier": 1,
43
42
  }
44
43
 
45
44
  ui = self.window.ui
46
45
  conf_global = ui.config['global']
47
46
 
48
- container = QWidget()
47
+ container = QWidget(parent=self.window)
49
48
  ui.nodes['dalle.options'] = container
50
49
 
51
- conf_global['img_variants'] = OptionSlider(self.window, 'global', 'img_variants', option)
50
+ conf_global['img_variants'] = OptionInput(self.window, 'global', 'img_variants', option)
51
+ conf_global['img_variants'].setToolTip(trans("toolbox.img_variants.label"))
52
52
 
53
53
  option_resolutions = self.window.core.image.get_resolution_option()
54
54
  conf_global['img_resolution'] = OptionCombo(self.window, 'global', 'img_resolution', option_resolutions)
55
55
  conf_global['img_resolution'].setMinimumWidth(160)
56
56
 
57
+ conf_global['img.remix'] = QCheckBox(trans("img.remix"), parent=container)
58
+ conf_global['img.remix'].setToolTip(trans("img.remix.tooltip"))
59
+ conf_global['img.remix'].toggled.connect(self.window.controller.media.toggle_remix_image)
60
+
61
+ cols = QHBoxLayout()
62
+ cols.addWidget(conf_global['img_resolution'], 3)
63
+ cols.addWidget(conf_global['img_variants'], 1)
64
+ cols.setContentsMargins(2, 5, 5, 5)
65
+
57
66
  rows = QVBoxLayout()
58
- rows.addWidget(conf_global['img_variants'])
59
- rows.addWidget(conf_global['img_resolution'])
67
+ rows.addLayout(cols)
68
+ rows.addWidget(conf_global['img.remix'])
60
69
  rows.setContentsMargins(2, 5, 5, 5)
61
70
 
62
71
  container.setLayout(rows)
63
- container.setContentsMargins(2, 0, 0, 0)
72
+ container.setContentsMargins(2, 0, 0, 10)
73
+ container.setFixedHeight(100)
64
74
 
65
75
  return container
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.25 20:00:00 #
9
+ # Updated Date: 2025.12.30 22:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from PySide6.QtWidgets import QVBoxLayout, QHBoxLayout, QWidget, QCheckBox
@@ -34,7 +34,7 @@ class Raw:
34
34
  ui = self.window.ui
35
35
  conf_global = ui.config['global']
36
36
 
37
- container = QWidget()
37
+ container = QWidget(parent=self.window)
38
38
  ui.nodes['media.raw'] = container
39
39
 
40
40
  conf_global['img_raw'] = QCheckBox(trans("img.raw"), parent=container)
@@ -6,13 +6,14 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.28 18:00:00 #
9
+ # Updated Date: 2025.12.30 22:00:00 #
10
10
  # ================================================== #
11
11
 
12
- from PySide6.QtWidgets import QWidget, QHBoxLayout
12
+ from PySide6.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, QCheckBox
13
13
 
14
14
  from pygpt_net.ui.widget.option.combo import OptionCombo
15
15
  from pygpt_net.ui.widget.option.input import OptionInput
16
+ from pygpt_net.utils import trans
16
17
 
17
18
 
18
19
  class Video:
@@ -34,7 +35,7 @@ class Video:
34
35
  ui = self.window.ui
35
36
  conf_global = ui.config['global']
36
37
 
37
- container = QWidget()
38
+ container = QWidget(parent=self.window)
38
39
  ui.nodes['video.options'] = container
39
40
 
40
41
  option_ratio = self.window.core.video.get_aspect_ratio_option()
@@ -44,18 +45,29 @@ class Video:
44
45
  conf_global['video.aspect_ratio'] = OptionCombo(self.window, 'global', 'video.aspect_ratio', option_ratio)
45
46
  conf_global['video.resolution'] = OptionCombo(self.window, 'global', 'video.resolution', option_resolution)
46
47
  conf_global['video.duration'] = OptionInput(self.window, 'global', 'video.duration', option_duration)
48
+ conf_global['video.duration'].setToolTip(trans('settings.video.duration.desc'))
47
49
 
48
50
  conf_global['video.aspect_ratio'].setMinimumWidth(120)
49
51
  conf_global['video.resolution'].setMinimumWidth(120)
50
52
  conf_global['video.duration'].setMinimumWidth(50)
51
53
 
52
- rows = QHBoxLayout()
53
- rows.addWidget(conf_global['video.resolution'], 2)
54
- rows.addWidget(conf_global['video.aspect_ratio'], 2)
55
- rows.addWidget(conf_global['video.duration'], 1)
56
- rows.setContentsMargins(2, 5, 5, 5)
54
+ conf_global['video.remix'] = QCheckBox(trans("video.remix"), parent=container)
55
+ conf_global['video.remix'].setToolTip(trans("video.remix.tooltip"))
56
+ conf_global['video.remix'].toggled.connect(self.window.controller.media.toggle_remix_video)
57
+
58
+ cols = QHBoxLayout()
59
+ cols.addWidget(conf_global['video.resolution'], 2)
60
+ cols.addWidget(conf_global['video.aspect_ratio'], 2)
61
+ cols.addWidget(conf_global['video.duration'], 1)
62
+ cols.setContentsMargins(2, 5, 5, 5)
63
+
64
+ rows = QVBoxLayout()
65
+ rows.addLayout(cols)
66
+ rows.addWidget(conf_global['video.remix'])
67
+ rows.setContentsMargins(0, 0, 0, 0)
57
68
 
58
69
  container.setLayout(rows)
59
- container.setContentsMargins(2, 0, 0, 0)
70
+ container.setContentsMargins(2, 0, 0, 10)
71
+ container.setFixedHeight(90)
60
72
 
61
73
  return container