pygpt-net 2.6.33__py3-none-any.whl → 2.6.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. pygpt_net/CHANGELOG.txt +14 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/assistant/batch.py +14 -4
  4. pygpt_net/controller/assistant/files.py +1 -0
  5. pygpt_net/controller/assistant/store.py +195 -1
  6. pygpt_net/controller/camera/camera.py +1 -1
  7. pygpt_net/controller/chat/common.py +58 -48
  8. pygpt_net/controller/chat/handler/stream_worker.py +55 -43
  9. pygpt_net/controller/config/placeholder.py +95 -75
  10. pygpt_net/controller/dialogs/confirm.py +3 -1
  11. pygpt_net/controller/media/media.py +11 -3
  12. pygpt_net/controller/painter/common.py +243 -13
  13. pygpt_net/controller/painter/painter.py +11 -2
  14. pygpt_net/core/assistants/files.py +18 -0
  15. pygpt_net/core/bridge/bridge.py +1 -5
  16. pygpt_net/core/bridge/context.py +81 -36
  17. pygpt_net/core/bridge/worker.py +3 -1
  18. pygpt_net/core/camera/camera.py +31 -402
  19. pygpt_net/core/camera/worker.py +430 -0
  20. pygpt_net/core/ctx/bag.py +4 -0
  21. pygpt_net/core/events/app.py +10 -17
  22. pygpt_net/core/events/base.py +17 -25
  23. pygpt_net/core/events/control.py +9 -17
  24. pygpt_net/core/events/event.py +9 -62
  25. pygpt_net/core/events/kernel.py +8 -17
  26. pygpt_net/core/events/realtime.py +8 -17
  27. pygpt_net/core/events/render.py +9 -17
  28. pygpt_net/core/filesystem/url.py +3 -0
  29. pygpt_net/core/render/web/body.py +454 -40
  30. pygpt_net/core/render/web/pid.py +39 -24
  31. pygpt_net/core/render/web/renderer.py +146 -40
  32. pygpt_net/core/text/utils.py +3 -0
  33. pygpt_net/data/config/config.json +4 -3
  34. pygpt_net/data/config/models.json +3 -3
  35. pygpt_net/data/config/settings.json +10 -5
  36. pygpt_net/data/css/web-blocks.css +3 -2
  37. pygpt_net/data/css/web-chatgpt.css +3 -1
  38. pygpt_net/data/css/web-chatgpt_wide.css +3 -1
  39. pygpt_net/data/locale/locale.de.ini +9 -7
  40. pygpt_net/data/locale/locale.en.ini +10 -6
  41. pygpt_net/data/locale/locale.es.ini +9 -7
  42. pygpt_net/data/locale/locale.fr.ini +9 -7
  43. pygpt_net/data/locale/locale.it.ini +9 -7
  44. pygpt_net/data/locale/locale.pl.ini +9 -7
  45. pygpt_net/data/locale/locale.uk.ini +9 -7
  46. pygpt_net/data/locale/locale.zh.ini +9 -7
  47. pygpt_net/item/assistant.py +13 -1
  48. pygpt_net/provider/api/google/__init__.py +46 -28
  49. pygpt_net/provider/api/openai/__init__.py +13 -10
  50. pygpt_net/provider/api/openai/store.py +45 -1
  51. pygpt_net/provider/core/config/patch.py +9 -0
  52. pygpt_net/provider/llms/google.py +4 -0
  53. pygpt_net/ui/dialog/assistant_store.py +213 -203
  54. pygpt_net/ui/layout/chat/input.py +3 -3
  55. pygpt_net/ui/layout/chat/painter.py +63 -4
  56. pygpt_net/ui/widget/draw/painter.py +715 -104
  57. pygpt_net/ui/widget/option/combo.py +5 -1
  58. pygpt_net/ui/widget/textarea/input.py +273 -3
  59. pygpt_net/ui/widget/textarea/web.py +2 -0
  60. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.35.dist-info}/METADATA +16 -2
  61. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.35.dist-info}/RECORD +64 -63
  62. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.35.dist-info}/LICENSE +0 -0
  63. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.35.dist-info}/WHEEL +0 -0
  64. {pygpt_net-2.6.33.dist-info → pygpt_net-2.6.35.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.28 20:00:00 #
9
+ # Updated Date: 2025.09.04 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -14,6 +14,7 @@ import io
14
14
  import json
15
15
  from dataclasses import dataclass, field
16
16
  from typing import Optional, Literal, Any
17
+ from enum import Enum
17
18
 
18
19
  from PySide6.QtCore import QObject, Signal, Slot, QRunnable
19
20
 
@@ -40,16 +41,18 @@ EventType = Literal[
40
41
  "error",
41
42
  ]
42
43
 
43
- # Chunks
44
- ChunkType = Literal[
45
- "api_chat",
46
- "api_chat_responses",
47
- "api_completion",
48
- "langchain_chat",
49
- "llama_chat",
50
- "google",
51
- "raw",
52
- ]
44
+
45
+ class ChunkType(str, Enum):
46
+ """
47
+ Enum for chunk type classification.
48
+ """
49
+ API_CHAT = "api_chat"
50
+ API_CHAT_RESPONSES = "api_chat_responses"
51
+ API_COMPLETION = "api_completion"
52
+ LANGCHAIN_CHAT = "langchain_chat"
53
+ LLAMA_CHAT = "llama_chat"
54
+ GOOGLE = "google"
55
+ RAW = "raw"
53
56
 
54
57
 
55
58
  class WorkerSignals(QObject):
@@ -64,10 +67,10 @@ class WorkerSignals(QObject):
64
67
  eventReady = Signal(object)
65
68
 
66
69
 
67
- @dataclass
70
+ @dataclass(slots=True)
68
71
  class WorkerState:
69
72
  """Holds mutable state for the streaming loop."""
70
- output_parts: list[str] = field(default_factory=list)
73
+ out: Optional[io.StringIO] = None
71
74
  output_tokens: int = 0
72
75
  begin: bool = True
73
76
  error: Optional[Exception] = None
@@ -81,7 +84,7 @@ class WorkerState:
81
84
  is_code: bool = False
82
85
  force_func_call: bool = False
83
86
  stopped: bool = False
84
- chunk_type: ChunkType = "raw"
87
+ chunk_type: ChunkType = ChunkType.RAW
85
88
  generator: Any = None
86
89
  usage_vendor: Optional[str] = None
87
90
  usage_payload: dict = field(default_factory=dict)
@@ -90,6 +93,8 @@ class WorkerState:
90
93
 
91
94
 
92
95
  class StreamWorker(QRunnable):
96
+ __slots__ = ("signals", "ctx", "window", "stream")
97
+
93
98
  def __init__(self, ctx: CtxItem, window, parent=None):
94
99
  super().__init__()
95
100
  self.signals = WorkerSignals()
@@ -134,7 +139,7 @@ class StreamWorker(QRunnable):
134
139
  if ctx.use_responses_api:
135
140
  if hasattr(chunk, 'type'):
136
141
  etype = chunk.type # type: ignore[assignment]
137
- state.chunk_type = "api_chat_responses"
142
+ state.chunk_type = ChunkType.API_CHAT_RESPONSES
138
143
  else:
139
144
  continue
140
145
  else:
@@ -199,23 +204,21 @@ class StreamWorker(QRunnable):
199
204
  :param chunk: The chunk object from the stream
200
205
  :return: Detected ChunkType
201
206
  """
202
- if (hasattr(chunk, 'choices')
203
- and chunk.choices
204
- and hasattr(chunk.choices[0], 'delta')
205
- and chunk.choices[0].delta is not None):
206
- return "api_chat"
207
- if (hasattr(chunk, 'choices')
208
- and chunk.choices
209
- and hasattr(chunk.choices[0], 'text')
210
- and chunk.choices[0].text is not None):
211
- return "api_completion"
212
- if hasattr(chunk, 'content') and chunk.content is not None:
213
- return "langchain_chat"
214
- if hasattr(chunk, 'delta') and chunk.delta is not None:
215
- return "llama_chat"
216
- if hasattr(chunk, "candidates"): # Google python-genai chunk
217
- return "google"
218
- return "raw"
207
+ choices = getattr(chunk, 'choices', None)
208
+ if choices:
209
+ choice0 = choices[0] if len(choices) > 0 else None
210
+ if choice0 is not None and hasattr(choice0, 'delta') and choice0.delta is not None:
211
+ return ChunkType.API_CHAT
212
+ if choice0 is not None and hasattr(choice0, 'text') and choice0.text is not None:
213
+ return ChunkType.API_COMPLETION
214
+
215
+ if hasattr(chunk, 'content') and getattr(chunk, 'content') is not None:
216
+ return ChunkType.LANGCHAIN_CHAT
217
+ if hasattr(chunk, 'delta') and getattr(chunk, 'delta') is not None:
218
+ return ChunkType.LLAMA_CHAT
219
+ if hasattr(chunk, "candidates"):
220
+ return ChunkType.GOOGLE
221
+ return ChunkType.RAW
219
222
 
220
223
  def _append_response(
221
224
  self,
@@ -236,7 +239,10 @@ class StreamWorker(QRunnable):
236
239
  """
237
240
  if state.begin and response == "":
238
241
  return
239
- state.output_parts.append(response)
242
+ # Use a single expandable buffer to avoid per-chunk list allocations
243
+ if state.out is None:
244
+ state.out = io.StringIO()
245
+ state.out.write(response)
240
246
  state.output_tokens += 1
241
247
  emit_event(
242
248
  RenderEvent(
@@ -307,9 +313,14 @@ class StreamWorker(QRunnable):
307
313
  :param state: WorkerState
308
314
  :param emit_end: Function to emit end signal
309
315
  """
310
- # Build final output
311
- output = "".join(state.output_parts)
312
- state.output_parts.clear()
316
+ # Build final output from the incremental buffer
317
+ output = state.out.getvalue() if state.out is not None else ""
318
+ if state.out is not None:
319
+ try:
320
+ state.out.close()
321
+ except Exception:
322
+ pass
323
+ state.out = None
313
324
 
314
325
  if has_unclosed_code_tag(output):
315
326
  output += "\n```"
@@ -336,6 +347,7 @@ class StreamWorker(QRunnable):
336
347
 
337
348
  self.stream = None
338
349
  ctx.output = output
350
+ output = None # free ref
339
351
 
340
352
  # Tokens usage
341
353
  if state.usage_payload:
@@ -418,17 +430,17 @@ class StreamWorker(QRunnable):
418
430
  :return: Response delta string or None
419
431
  """
420
432
  t = state.chunk_type
421
- if t == "api_chat":
433
+ if t == ChunkType.API_CHAT:
422
434
  return self._process_api_chat(ctx, state, chunk)
423
- if t == "api_chat_responses":
435
+ if t == ChunkType.API_CHAT_RESPONSES:
424
436
  return self._process_api_chat_responses(ctx, core, state, chunk, etype)
425
- if t == "api_completion":
437
+ if t == ChunkType.API_COMPLETION:
426
438
  return self._process_api_completion(chunk)
427
- if t == "langchain_chat":
439
+ if t == ChunkType.LANGCHAIN_CHAT:
428
440
  return self._process_langchain_chat(chunk)
429
- if t == "llama_chat":
441
+ if t == ChunkType.LLAMA_CHAT:
430
442
  return self._process_llama_chat(state, chunk)
431
- if t == "google":
443
+ if t == ChunkType.GOOGLE:
432
444
  return self._process_google_chunk(ctx, core, state, chunk)
433
445
  # raw fallback
434
446
  return self._process_raw(chunk)
@@ -1109,7 +1121,7 @@ class StreamWorker(QRunnable):
1109
1121
  except Exception:
1110
1122
  pass
1111
1123
 
1112
- # Bind to ctx on first discovery for compatibility with other parts of the app
1124
+ # Bind to ctx on first discovery
1113
1125
  if state.citations and (ctx.urls is None or not ctx.urls):
1114
1126
  ctx.urls = list(state.citations)
1115
1127
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.26 19:00:00 #
9
+ # Updated Date: 2025.09.02 16:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, List
@@ -29,42 +29,48 @@ class Placeholder:
29
29
  """
30
30
  self.window = window
31
31
  self._apply_handlers = {
32
- "presets": lambda p: self.get_presets(p),
33
- "modes": lambda p: self.get_modes(p),
34
- "models": lambda p: self.get_models(p),
35
- "languages": lambda p: self.get_languages(),
36
- "multimodal": lambda p: self.get_multimodal(p),
37
- "langchain_providers": lambda p: self.get_langchain_providers(),
38
- "llama_index_providers": lambda p: self.get_llama_index_providers(),
39
- "llm_providers": lambda p: self.get_llm_providers(),
40
- "embeddings_providers": lambda p: self.get_embeddings_providers(),
41
- "llama_index_loaders": lambda p: self.get_llama_index_loaders(),
42
- "llama_index_loaders_file": lambda p: self.get_llama_index_loaders(type="file"),
43
- "llama_index_loaders_web": lambda p: self.get_llama_index_loaders(type="web"),
44
- "llama_index_chat_modes": lambda p: self.get_llama_index_chat_modes(),
45
- "vector_storage": lambda p: self.get_vector_storage(),
46
- "var_types": lambda p: self.get_var_types(),
32
+ "access_actions": lambda p: self.get_access_actions(),
47
33
  "agent_modes": lambda p: self.get_agent_modes(),
48
34
  "agent_provider": lambda p: self.get_agent_providers(),
49
35
  "agent_provider_llama": lambda p: self.get_agent_providers_llama(),
50
36
  "agent_provider_openai": lambda p: self.get_agent_providers_openai(),
51
- "remote_tools_openai": lambda p: self.get_remote_tools_openai(),
52
- "syntax_styles": lambda p: self.get_syntax_styles(),
53
- "styles": lambda p: self.get_styles(),
37
+ "audio_input_backend": lambda p: self.get_audio_input_backend(),
38
+ "audio_input_devices": lambda p: self.get_audio_input_devices(),
39
+ "audio_output_backend": lambda p: self.get_audio_output_backend(),
40
+ "audio_output_devices": lambda p: self.get_audio_output_devices(),
41
+ "audio_tts_whisper_voices": lambda p: self.get_audio_tts_whisper_voices(),
42
+ "camera_devices": lambda p: self.get_camera_devices(),
43
+ "embeddings_providers": lambda p: self.get_embeddings_providers(),
54
44
  "idx": lambda p: self.get_idx(p),
55
45
  "keys": lambda p: self.get_keys(),
56
46
  "keys_modifiers": lambda p: self.get_modifiers(),
57
- "access_actions": lambda p: self.get_access_actions(),
47
+ "langchain_providers": lambda p: self.get_langchain_providers(),
48
+ "languages": lambda p: self.get_languages(),
49
+ "llama_index_chat_modes": lambda p: self.get_llama_index_chat_modes(),
50
+ "llama_index_loaders": lambda p: self.get_llama_index_loaders(),
51
+ "llama_index_loaders_file": lambda p: self.get_llama_index_loaders(type="file"),
52
+ "llama_index_loaders_web": lambda p: self.get_llama_index_loaders(type="web"),
53
+ "llama_index_providers": lambda p: self.get_llama_index_providers(),
54
+ "llm_providers": lambda p: self.get_llm_providers(),
55
+ "models": lambda p: self.get_models(p),
56
+ "modes": lambda p: self.get_modes(p),
57
+ "multimodal": lambda p: self.get_multimodal(p),
58
+ "presets": lambda p: self.get_presets(p),
59
+ "remote_tools_openai": lambda p: self.get_remote_tools_openai(),
58
60
  "speech_synthesis_actions": lambda p: self.get_speech_synthesis_actions(),
61
+ "styles": lambda p: self.get_styles(),
62
+ "syntax_styles": lambda p: self.get_syntax_styles(),
63
+ "vector_storage": lambda p: self.get_vector_storage(),
64
+ "var_types": lambda p: self.get_var_types(),
59
65
  "voice_control_actions": lambda p: self.get_voice_control_actions(),
60
- "audio_input_devices": lambda p: self.get_audio_input_devices(),
61
- "audio_output_devices": lambda p: self.get_audio_output_devices(),
62
- "audio_input_backend": lambda p: self.get_audio_input_backend(),
63
- "audio_output_backend": lambda p: self.get_audio_output_backend(),
64
- "audio_tts_whisper_voices": lambda p: self.get_audio_tts_whisper_voices(),
65
66
  }
66
67
 
67
68
  def _apply_combo_if_needed(self, item: Any):
69
+ """
70
+ Apply combo keys if needed
71
+
72
+ :param item: item to check
73
+ """
68
74
  if isinstance(item, dict) and item.get("type") == "combo":
69
75
  use = item.get("use")
70
76
  if use is not None:
@@ -74,6 +80,11 @@ class Placeholder:
74
80
  item["keys"] = self.apply_by_id(use, params)
75
81
 
76
82
  def _apply_suboptions(self, mapping: Dict[str, Any]):
83
+ """
84
+ Apply placeholders to suboptions in mapping
85
+
86
+ :param mapping: Suboptions mapping
87
+ """
77
88
  for item in mapping.values():
78
89
  self._apply_combo_if_needed(item)
79
90
 
@@ -102,6 +113,7 @@ class Placeholder:
102
113
 
103
114
  :param id: Placeholder options id
104
115
  :param params: Additional parameters for specific placeholders
116
+ :return: Filled placeholder list
105
117
  """
106
118
  if params is None:
107
119
  params = {}
@@ -112,7 +124,7 @@ class Placeholder:
112
124
  """
113
125
  Get audio TTS whisper voices list
114
126
 
115
- :return: placeholders list
127
+ :return: Filled placeholder list
116
128
  """
117
129
  voices = self.window.core.audio.whisper.get_voices()
118
130
  return [{v: v} for v in voices]
@@ -121,16 +133,24 @@ class Placeholder:
121
133
  """
122
134
  Get audio input devices list
123
135
 
124
- :return: placeholders list
136
+ :return: Filled placeholder list
125
137
  """
126
138
  devices = self.window.core.audio.get_input_devices()
127
139
  return [{str(did): name} for did, name in devices]
128
140
 
141
+ def get_camera_devices(self) -> List[Dict[str, str]]:
142
+ """
143
+ Get camera devices list
144
+
145
+ :return: Filled placeholder list
146
+ """
147
+ return self.window.core.camera.get_devices()
148
+
129
149
  def get_audio_output_devices(self) -> List[Dict[str, str]]:
130
150
  """
131
151
  Get audio output devices list
132
152
 
133
- :return: placeholders list
153
+ :return: Filled placeholder list
134
154
  """
135
155
  devices = self.window.core.audio.get_output_devices()
136
156
  return [{str(did): name} for did, name in devices]
@@ -139,7 +159,7 @@ class Placeholder:
139
159
  """
140
160
  Get audio input backends list
141
161
 
142
- :return: placeholders list
162
+ :return: Filled placeholder list
143
163
  """
144
164
  items = self.window.core.audio.get_input_backends()
145
165
  return [{str(i): name} for i, name in items]
@@ -148,25 +168,25 @@ class Placeholder:
148
168
  """
149
169
  Get audio output backends list
150
170
 
151
- :return: placeholders list
171
+ :return: Filled placeholder list
152
172
  """
153
173
  items = self.window.core.audio.get_output_backends()
154
174
  return [{str(i): name} for i, name in items]
155
175
 
156
176
  def get_langchain_providers(self) -> List[Dict[str, str]]:
157
177
  """
158
- Get Langchain LLM provider placeholders list
178
+ Get Langchain LLM providers list
159
179
 
160
- :return: placeholders list
180
+ :return: Filled placeholder list
161
181
  """
162
182
  choices = self.window.core.llm.get_choices(MODE_LANGCHAIN)
163
183
  return [{k: v} for k, v in choices.items()]
164
184
 
165
185
  def get_llama_index_providers(self) -> List[Dict[str, str]]:
166
186
  """
167
- Get Llama-index LLM provider placeholders list
187
+ Get Llama-index LLM providers list
168
188
 
169
- :return: placeholders list
189
+ :return: Filled placeholder list
170
190
  """
171
191
  choices = self.window.core.llm.get_choices(MODE_LLAMA_INDEX)
172
192
  return [{k: v} for k, v in choices.items()]
@@ -175,49 +195,49 @@ class Placeholder:
175
195
  """
176
196
  Get all LLM provider placeholders list
177
197
 
178
- :return: placeholders list
198
+ :return: Filled placeholder list
179
199
  """
180
200
  choices = self.window.core.llm.get_choices()
181
201
  return [{k: v} for k, v in choices.items()]
182
202
 
183
203
  def get_embeddings_providers(self) -> List[Dict[str, str]]:
184
204
  """
185
- Get embeddings placeholders list
205
+ Get embedding providers list
186
206
 
187
- :return: placeholders list
207
+ :return: Filled placeholder list
188
208
  """
189
209
  choices = self.window.core.llm.get_choices("embeddings")
190
210
  return [{k: v} for k, v in choices.items()]
191
211
 
192
212
  def get_agent_providers(self) -> List[Dict[str, str]]:
193
213
  """
194
- Get all agent provider placeholders list
214
+ Get all agent providers list
195
215
 
196
- :return: placeholders list
216
+ :return: Filled placeholder list
197
217
  """
198
218
  return self.window.core.agents.provider.get_choices()
199
219
 
200
220
  def get_agent_providers_llama(self) -> List[Dict[str, str]]:
201
221
  """
202
- Get Llama-index agent provider placeholders list
222
+ Get Llama-index agent provider list
203
223
 
204
- :return: placeholders list
224
+ :return: Filled placeholder list
205
225
  """
206
226
  return self.window.core.agents.provider.get_choices(AGENT_TYPE_LLAMA)
207
227
 
208
228
  def get_agent_providers_openai(self) -> List[Dict[str, str]]:
209
229
  """
210
- Get OpenAI agent provider placeholders list
230
+ Get OpenAI agent provider list
211
231
 
212
- :return: placeholders list
232
+ :return: Filled placeholder list
213
233
  """
214
234
  return self.window.core.agents.provider.get_choices(AGENT_TYPE_OPENAI)
215
235
 
216
236
  def get_remote_tools_openai(self) -> List[Dict[str, str]]:
217
237
  """
218
- Get OpenAI remote tools placeholders list
238
+ Get OpenAI remote tools list
219
239
 
220
- :return: placeholders list
240
+ :return: Filled placeholder list
221
241
  """
222
242
  return self.window.core.api.openai.remote_tools.get_choices()
223
243
 
@@ -225,7 +245,7 @@ class Placeholder:
225
245
  """
226
246
  Get llama chat modes list
227
247
 
228
- :return: placeholders list
248
+ :return: Filled placeholder list
229
249
  """
230
250
  return [
231
251
  {"best": "best"},
@@ -239,10 +259,10 @@ class Placeholder:
239
259
 
240
260
  def get_llama_index_loaders(self, type: str = "all") -> List[Dict[str, str]]:
241
261
  """
242
- Get data loaders placeholders list
262
+ Get data loaders list
243
263
 
244
264
  :param type: data type
245
- :return: placeholders list
265
+ :return: Filled placeholder list
246
266
  """
247
267
  data = []
248
268
  choices = self.window.controller.idx.common.get_loaders_choices()
@@ -261,28 +281,28 @@ class Placeholder:
261
281
 
262
282
  def get_vector_storage(self) -> List[Dict[str, str]]:
263
283
  """
264
- Get vector storage placeholders list
284
+ Get vector storage list
265
285
 
266
- :return: placeholders list
286
+ :return: Filled placeholder list
267
287
  """
268
288
  ids = self.window.core.idx.storage.get_ids()
269
289
  return [{i: i} for i in ids]
270
290
 
271
291
  def get_var_types(self) -> List[Dict[str, str]]:
272
292
  """
273
- Get langchain placeholders list
293
+ Get var types list
274
294
 
275
- :return: placeholders list
295
+ :return: Filled placeholder list
276
296
  """
277
297
  types = ["str", "int", "float", "bool", "dict", "list", "None"]
278
298
  return [{t: t} for t in types]
279
299
 
280
300
  def get_presets(self, params: dict = None) -> List[Dict[str, str]]:
281
301
  """
282
- Get presets placeholders list
302
+ Get presets list
283
303
 
284
304
  :param params: Additional parameters for specific placeholders
285
- :return: Presets placeholders list
305
+ :return: Filled placeholder list
286
306
  """
287
307
  if params is None:
288
308
  params = {}
@@ -293,10 +313,10 @@ class Placeholder:
293
313
 
294
314
  def get_modes(self, params: dict = None) -> List[Dict[str, str]]:
295
315
  """
296
- Get modes placeholders list
316
+ Get modes list
297
317
 
298
318
  :param params: Additional parameters for specific placeholders
299
- :return: Modes placeholders list
319
+ :return: Filled placeholder list
300
320
  """
301
321
  if params is None:
302
322
  params = {}
@@ -305,10 +325,10 @@ class Placeholder:
305
325
 
306
326
  def get_multimodal(self, params: dict = None) -> List[Dict[str, str]]:
307
327
  """
308
- Get multimodal placeholders list
328
+ Get multimodal options list
309
329
 
310
330
  :param params: Additional parameters for specific placeholders
311
- :return: multimodal placeholders list
331
+ :return: Filled placeholder list
312
332
  """
313
333
  if params is None:
314
334
  params = {}
@@ -317,10 +337,10 @@ class Placeholder:
317
337
 
318
338
  def get_models(self, params: dict = None) -> List[Dict[str, str]]:
319
339
  """
320
- Get models placeholders list (+ provider separators)
340
+ Get models list (+ provider separators)
321
341
 
322
342
  :param params: Additional parameters for specific placeholders
323
- :return: Models placeholders list
343
+ :return: Filled placeholder list
324
344
  """
325
345
  if params is None:
326
346
  params = {}
@@ -354,9 +374,9 @@ class Placeholder:
354
374
 
355
375
  def get_agent_modes(self) -> List[Dict[str, str]]:
356
376
  """
357
- Get agent/expert modes placeholders list
377
+ Get agent/expert modes list
358
378
 
359
- :return: Models placeholders list
379
+ :return: Filled placeholder list
360
380
  """
361
381
  modes = self.window.core.agents.legacy.get_allowed_modes()
362
382
  return [{mid: trans(f"mode.{mid}")} for mid in modes]
@@ -365,16 +385,16 @@ class Placeholder:
365
385
  """
366
386
  Get world languages list
367
387
 
368
- :return: Languages placeholders list
388
+ :return: Filled placeholder list
369
389
  """
370
390
  return self.window.core.text.get_language_choices()
371
391
 
372
392
  def get_idx(self, params: dict = None) -> List[Dict[str, str]]:
373
393
  """
374
- Get indexes placeholders list
394
+ Get indexes (LlamaIndex) list
375
395
 
376
396
  :param params: Additional parameters for specific placeholders
377
- :return: Indexes placeholders list
397
+ :return: Filled placeholder list
378
398
  """
379
399
  if params is None:
380
400
  params = {}
@@ -389,9 +409,9 @@ class Placeholder:
389
409
 
390
410
  def get_syntax_styles(self) -> List[Dict[str, str]]:
391
411
  """
392
- Get highlighter styles list
412
+ Get code syntax highlighter styles list
393
413
 
394
- :return: placeholders list
414
+ :return: Filled placeholder list
395
415
  """
396
416
  styles = self.window.controller.chat.render.web_renderer.body.highlight.get_styles()
397
417
  styles.sort()
@@ -399,9 +419,9 @@ class Placeholder:
399
419
 
400
420
  def get_styles(self) -> List[Dict[str, str]]:
401
421
  """
402
- Get styles list
422
+ Get styles list (blocks, chatgpt, etc.)
403
423
 
404
- :return: placeholders list
424
+ :return: Filled placeholder list
405
425
  """
406
426
  styles = self.window.controller.theme.common.get_styles_list()
407
427
  styles.sort()
@@ -409,9 +429,9 @@ class Placeholder:
409
429
 
410
430
  def get_keys(self) -> List[Dict[str, str]]:
411
431
  """
412
- Get keys
432
+ Get keyboard keys list
413
433
 
414
- :return: keys
434
+ :return: Filled placeholder list
415
435
  """
416
436
  return self.window.core.access.shortcuts.get_keys_choices()
417
437
 
@@ -419,7 +439,7 @@ class Placeholder:
419
439
  """
420
440
  Get modifiers
421
441
 
422
- :return: keys
442
+ :return: Filled placeholder list
423
443
  """
424
444
  return self.window.core.access.shortcuts.get_modifiers_choices()
425
445
 
@@ -432,7 +452,7 @@ class Placeholder:
432
452
  """
433
453
  Get access actions list
434
454
 
435
- :return: app actions list
455
+ :return: Filled placeholder list
436
456
  """
437
457
  choices = self.window.core.access.actions.get_access_choices()
438
458
  return self._translate_sort_choices(choices)
@@ -441,7 +461,7 @@ class Placeholder:
441
461
  """
442
462
  Get speech actions list
443
463
 
444
- :return: app actions list
464
+ :return: Filled placeholder list
445
465
  """
446
466
  choices = self.window.core.access.actions.get_speech_synthesis_choices()
447
467
  return self._translate_sort_choices(choices)
@@ -450,7 +470,7 @@ class Placeholder:
450
470
  """
451
471
  Get voice control actions list
452
472
 
453
- :return: app actions list
473
+ :return: Filled placeholder list
454
474
  """
455
475
  choices = self.window.core.access.actions.get_voice_control_choices()
456
476
  return self._translate_sort_choices(choices)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.09 19:00:00 #
9
+ # Updated Date: 2025.09.02 22:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any, Optional
@@ -204,6 +204,8 @@ class Confirm:
204
204
  self.window.controller.assistant.batch.clear_store_files(id, True) # by store_id
205
205
  elif type == 'assistant.files.upload':
206
206
  self.window.controller.assistant.batch.upload(True)
207
+ elif type == 'assistant.file.delete':
208
+ self.window.controller.assistant.store.delete_file_by_idx(id, True)
207
209
  elif type == 'assistant.functions.import':
208
210
  self.window.controller.assistant.editor.import_functions(True)
209
211
  elif type == 'assistant.functions.clear':
@@ -80,12 +80,12 @@ class Media:
80
80
  self.window.core.config.save()
81
81
 
82
82
  def disable_raw(self):
83
- """Disable prompt enhancement for images"""
83
+ """Disable prompt enhancement for media"""
84
84
  self.window.core.config.set('img_raw', False)
85
85
  self.window.core.config.save()
86
86
 
87
87
  def toggle_raw(self):
88
- """Save prompt enhancement option for images"""
88
+ """Save prompt enhancement option for media"""
89
89
  state = self.window.ui.config['global']['img_raw'].isChecked()
90
90
  if not state:
91
91
  self.disable_raw()
@@ -112,4 +112,12 @@ class Media:
112
112
  current = self.window.core.config.get("model")
113
113
  model_data = self.window.core.models.get(current)
114
114
  if model_data:
115
- return model_data.is_video_output()
115
+ return model_data.is_video_output()
116
+
117
+ def play_video(self, path: str):
118
+ """
119
+ Play video file
120
+
121
+ :param path: path to video file
122
+ """
123
+ self.window.tools.get("player").play(path)