pygpt-net 2.6.30__py3-none-any.whl → 2.6.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. pygpt_net/CHANGELOG.txt +8 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +4 -0
  4. pygpt_net/controller/__init__.py +5 -2
  5. pygpt_net/controller/audio/audio.py +25 -1
  6. pygpt_net/controller/audio/ui.py +2 -2
  7. pygpt_net/controller/chat/audio.py +1 -8
  8. pygpt_net/controller/chat/common.py +29 -3
  9. pygpt_net/controller/chat/handler/__init__.py +0 -0
  10. pygpt_net/controller/chat/handler/stream_worker.py +1124 -0
  11. pygpt_net/controller/chat/output.py +8 -3
  12. pygpt_net/controller/chat/stream.py +3 -1071
  13. pygpt_net/controller/chat/text.py +3 -2
  14. pygpt_net/controller/kernel/kernel.py +11 -3
  15. pygpt_net/controller/kernel/reply.py +5 -1
  16. pygpt_net/controller/realtime/__init__.py +12 -0
  17. pygpt_net/controller/realtime/manager.py +53 -0
  18. pygpt_net/controller/realtime/realtime.py +268 -0
  19. pygpt_net/controller/ui/mode.py +7 -0
  20. pygpt_net/controller/ui/ui.py +19 -1
  21. pygpt_net/core/audio/audio.py +6 -1
  22. pygpt_net/core/audio/backend/native/__init__.py +12 -0
  23. pygpt_net/core/audio/backend/{native.py → native/native.py} +426 -127
  24. pygpt_net/core/audio/backend/native/player.py +139 -0
  25. pygpt_net/core/audio/backend/native/realtime.py +250 -0
  26. pygpt_net/core/audio/backend/pyaudio/__init__.py +12 -0
  27. pygpt_net/core/audio/backend/pyaudio/playback.py +194 -0
  28. pygpt_net/core/audio/backend/pyaudio/pyaudio.py +923 -0
  29. pygpt_net/core/audio/backend/pyaudio/realtime.py +275 -0
  30. pygpt_net/core/audio/backend/pygame/__init__.py +12 -0
  31. pygpt_net/core/audio/backend/{pygame.py → pygame/pygame.py} +130 -19
  32. pygpt_net/core/audio/backend/shared/__init__.py +38 -0
  33. pygpt_net/core/audio/backend/shared/conversions.py +211 -0
  34. pygpt_net/core/audio/backend/shared/envelope.py +38 -0
  35. pygpt_net/core/audio/backend/shared/player.py +137 -0
  36. pygpt_net/core/audio/backend/shared/rt.py +52 -0
  37. pygpt_net/core/audio/capture.py +5 -0
  38. pygpt_net/core/audio/output.py +13 -2
  39. pygpt_net/core/audio/whisper.py +6 -2
  40. pygpt_net/core/bridge/bridge.py +2 -1
  41. pygpt_net/core/bridge/worker.py +4 -1
  42. pygpt_net/core/dispatcher/dispatcher.py +37 -1
  43. pygpt_net/core/events/__init__.py +2 -1
  44. pygpt_net/core/events/realtime.py +55 -0
  45. pygpt_net/core/image/image.py +51 -1
  46. pygpt_net/core/realtime/__init__.py +0 -0
  47. pygpt_net/core/realtime/options.py +87 -0
  48. pygpt_net/core/realtime/shared/__init__.py +0 -0
  49. pygpt_net/core/realtime/shared/audio.py +213 -0
  50. pygpt_net/core/realtime/shared/loop.py +64 -0
  51. pygpt_net/core/realtime/shared/session.py +59 -0
  52. pygpt_net/core/realtime/shared/text.py +37 -0
  53. pygpt_net/core/realtime/shared/tools.py +276 -0
  54. pygpt_net/core/realtime/shared/turn.py +38 -0
  55. pygpt_net/core/realtime/shared/types.py +16 -0
  56. pygpt_net/core/realtime/worker.py +164 -0
  57. pygpt_net/core/types/__init__.py +1 -0
  58. pygpt_net/core/types/image.py +48 -0
  59. pygpt_net/data/config/config.json +10 -4
  60. pygpt_net/data/config/models.json +149 -103
  61. pygpt_net/data/config/settings.json +50 -0
  62. pygpt_net/data/locale/locale.de.ini +5 -5
  63. pygpt_net/data/locale/locale.en.ini +19 -13
  64. pygpt_net/data/locale/locale.es.ini +5 -5
  65. pygpt_net/data/locale/locale.fr.ini +5 -5
  66. pygpt_net/data/locale/locale.it.ini +5 -5
  67. pygpt_net/data/locale/locale.pl.ini +5 -5
  68. pygpt_net/data/locale/locale.uk.ini +5 -5
  69. pygpt_net/data/locale/locale.zh.ini +1 -1
  70. pygpt_net/data/locale/plugin.audio_input.en.ini +4 -0
  71. pygpt_net/data/locale/plugin.audio_output.en.ini +4 -0
  72. pygpt_net/plugin/audio_input/plugin.py +37 -4
  73. pygpt_net/plugin/audio_input/simple.py +57 -8
  74. pygpt_net/plugin/cmd_files/worker.py +3 -0
  75. pygpt_net/provider/api/google/__init__.py +39 -6
  76. pygpt_net/provider/api/google/audio.py +8 -1
  77. pygpt_net/provider/api/google/chat.py +45 -6
  78. pygpt_net/provider/api/google/image.py +226 -86
  79. pygpt_net/provider/api/google/realtime/__init__.py +12 -0
  80. pygpt_net/provider/api/google/realtime/client.py +1945 -0
  81. pygpt_net/provider/api/google/realtime/realtime.py +186 -0
  82. pygpt_net/provider/api/openai/__init__.py +22 -2
  83. pygpt_net/provider/api/openai/realtime/__init__.py +12 -0
  84. pygpt_net/provider/api/openai/realtime/client.py +1828 -0
  85. pygpt_net/provider/api/openai/realtime/realtime.py +194 -0
  86. pygpt_net/provider/audio_input/google_genai.py +103 -0
  87. pygpt_net/provider/audio_output/google_genai_tts.py +229 -0
  88. pygpt_net/provider/audio_output/google_tts.py +0 -12
  89. pygpt_net/provider/audio_output/openai_tts.py +8 -5
  90. pygpt_net/provider/core/config/patch.py +15 -0
  91. pygpt_net/provider/core/model/patch.py +11 -0
  92. pygpt_net/provider/llms/google.py +8 -9
  93. pygpt_net/ui/layout/toolbox/footer.py +16 -0
  94. pygpt_net/ui/layout/toolbox/image.py +5 -0
  95. pygpt_net/ui/widget/option/combo.py +15 -1
  96. {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/METADATA +26 -14
  97. {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/RECORD +100 -62
  98. pygpt_net/core/audio/backend/pyaudio.py +0 -554
  99. {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/LICENSE +0 -0
  100. {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/WHEEL +0 -0
  101. {pygpt_net-2.6.30.dist-info → pygpt_net-2.6.31.dist-info}/entry_points.txt +0 -0
@@ -6,9 +6,10 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.28 20:00:00 #
9
+ # Updated Date: 2025.08.29 20:40:00 #
10
10
  # ================================================== #
11
11
 
12
+ import mimetypes
12
13
  from typing import Optional, Dict, Any, List
13
14
  from google import genai
14
15
  from google.genai import types as gtypes
@@ -37,12 +38,12 @@ class Image:
37
38
  sync: bool = True
38
39
  ) -> bool:
39
40
  """
40
- Generate image(s) using Google GenAI API
41
+ Generate or edit image(s) using Google GenAI API (Developer API or Vertex AI).
41
42
 
42
- :param context: BridgeContext
43
- :param extra: Extra parameters (num, inline)
44
- :param sync: Run synchronously if True
45
- :return: bool
43
+ :param context: BridgeContext with prompt, model, attachments
44
+ :param extra: extra parameters (num, inline)
45
+ :param sync: run synchronously (blocking) if True
46
+ :return: True if started
46
47
  """
47
48
  extra = extra or {}
48
49
  ctx = context.ctx or CtxItem()
@@ -51,6 +52,14 @@ class Image:
51
52
  num = int(extra.get("num", 1))
52
53
  inline = bool(extra.get("inline", False))
53
54
 
55
+ # decide sub-mode based on attachments
56
+ sub_mode = self.MODE_GENERATE
57
+ attachments = context.attachments
58
+ if attachments and len(attachments) > 0:
59
+ pass # TODO: implement edit!
60
+ # sub_mode = self.MODE_EDIT
61
+
62
+ # model used to improve the prompt (not image model)
54
63
  prompt_model = self.window.core.models.from_defaults()
55
64
  tmp = self.window.core.config.get('img_prompt_model')
56
65
  if self.window.core.models.has(tmp):
@@ -60,9 +69,11 @@ class Image:
60
69
  worker.window = self.window
61
70
  worker.client = self.window.core.api.google.get_client()
62
71
  worker.ctx = ctx
63
- worker.model = model.id
72
+ worker.mode = sub_mode
73
+ worker.attachments = attachments or {}
74
+ worker.model = model.id # image model id
64
75
  worker.input_prompt = prompt
65
- worker.model_prompt = prompt_model
76
+ worker.model_prompt = prompt_model # LLM for prompt rewriting
66
77
  worker.system_prompt = self.window.core.prompt.get('img')
67
78
  worker.raw = self.window.core.config.get('img_raw')
68
79
  worker.num = num
@@ -87,10 +98,10 @@ class Image:
87
98
 
88
99
 
89
100
  class ImageSignals(QObject):
90
- finished = Signal(object, list, str) # ctx, paths, prompt
101
+ finished = Signal(object, list, str) # ctx, paths, prompt
91
102
  finished_inline = Signal(object, list, str) # ctx, paths, prompt
92
- status = Signal(object) # message
93
- error = Signal(object) # exception
103
+ status = Signal(object) # message
104
+ error = Signal(object) # exception
94
105
 
95
106
 
96
107
  class ImageWorker(QRunnable):
@@ -100,7 +111,11 @@ class ImageWorker(QRunnable):
100
111
  self.window = None
101
112
  self.client: Optional[genai.Client] = None
102
113
  self.ctx: Optional[CtxItem] = None
103
- self.model = "imagen-4.0-generate-001"
114
+
115
+ # params
116
+ self.mode = Image.MODE_GENERATE
117
+ self.attachments: Dict[str, Any] = {}
118
+ self.model = "imagen-4.0-generate-preview-06-06"
104
119
  self.model_prompt = None
105
120
  self.input_prompt = ""
106
121
  self.system_prompt = ""
@@ -109,11 +124,17 @@ class ImageWorker(QRunnable):
109
124
  self.num = 1
110
125
  self.resolution = "1024x1024" # used to derive aspect ratio for Imagen
111
126
 
127
+ # limits
128
+ self.imagen_max_num = 4 # Imagen returns up to 4 images
129
+
130
+ # fallbacks
131
+ self.DEFAULT_GEMINI_IMAGE_MODEL = "gemini-2.0-flash-preview-image-generation"
132
+
112
133
  @Slot()
113
134
  def run(self):
114
135
  try:
115
- # Optional prompt enhancement
116
- if not self.raw and not not self.inline:
136
+ # optional prompt enhancement
137
+ if not self.raw and not self.inline:
117
138
  try:
118
139
  self.signals.status.emit(trans('img.status.prompt.wait'))
119
140
  bridge_context = BridgeContext(
@@ -135,68 +156,98 @@ class ImageWorker(QRunnable):
135
156
  self.signals.status.emit(trans('img.status.generating') + f": {self.input_prompt}...")
136
157
 
137
158
  paths: List[str] = []
138
- if self._is_imagen(self.model):
139
- # Imagen: generate_images
140
- resp = self._imagen_generate(self.input_prompt, self.num, self.resolution)
141
- imgs = getattr(resp, "generated_images", None) or []
142
- for idx, gi in enumerate(imgs[: self.num]):
143
- data = self._extract_imagen_bytes(gi)
144
- p = self._save(idx, data)
145
- if p:
146
- paths.append(p)
159
+
160
+ if self.mode == Image.MODE_EDIT:
161
+ # EDIT
162
+ if self._using_vertex():
163
+ # Vertex Imagen edit API (preferred)
164
+ resp = self._imagen_edit(self.input_prompt, self.attachments, self.num)
165
+ imgs = getattr(resp, "generated_images", None) or []
166
+ for idx, gi in enumerate(imgs[: self.num]):
167
+ data = self._extract_imagen_bytes(gi)
168
+ p = self._save(idx, data)
169
+ if p:
170
+ paths.append(p)
171
+ else:
172
+ # Developer API fallback via Gemini image model; force v1 to avoid 404
173
+ resp = self._gemini_edit(self.input_prompt, self.attachments, self.num)
174
+ saved = 0
175
+ for cand in getattr(resp, "candidates", []) or []:
176
+ parts = getattr(getattr(cand, "content", None), "parts", None) or []
177
+ for part in parts:
178
+ inline = getattr(part, "inline_data", None)
179
+ if inline and getattr(inline, "data", None):
180
+ p = self._save(saved, inline.data)
181
+ if p:
182
+ paths.append(p)
183
+ saved += 1
184
+ if saved >= self.num:
185
+ break
186
+ if saved >= self.num:
187
+ break
188
+
147
189
  else:
148
- # Gemini image preview: generate_content -> parts[].inline_data.data
149
- resp = self.client.models.generate_content(
150
- model=self.model,
151
- contents=[self.input_prompt],
152
- )
153
- from PIL import Image as PILImage
154
- from io import BytesIO
155
- cands = getattr(resp, "candidates", None) or []
156
- saved = 0
157
- for cand in cands:
158
- parts = getattr(getattr(cand, "content", None), "parts", None) or []
159
- for part in parts:
160
- inline = getattr(part, "inline_data", None)
161
- if inline and getattr(inline, "data", None):
162
- data = inline.data
163
- p = self._save(saved, data)
164
- if p:
165
- paths.append(p)
166
- saved += 1
167
- if saved >= self.num:
168
- break
169
- if saved >= self.num:
170
- break
190
+ # GENERATE
191
+ if self._is_imagen_generate(self.model) and self._using_vertex():
192
+ num = min(self.num, self.imagen_max_num)
193
+ resp = self._imagen_generate(self.input_prompt, num, self.resolution)
194
+ imgs = getattr(resp, "generated_images", None) or []
195
+ for idx, gi in enumerate(imgs[: num]):
196
+ data = self._extract_imagen_bytes(gi)
197
+ p = self._save(idx, data)
198
+ if p:
199
+ paths.append(p)
200
+ else:
201
+ # Gemini Developer API image generation (needs response_modalities)
202
+ resp = self.client.models.generate_content(
203
+ model=self.model,
204
+ contents=[self.input_prompt],
205
+ config=gtypes.GenerateContentConfig(
206
+ response_modalities=[gtypes.Modality.TEXT, gtypes.Modality.IMAGE],
207
+ ),
208
+ )
209
+ saved = 0
210
+ for cand in getattr(resp, "candidates", []) or []:
211
+ parts = getattr(getattr(cand, "content", None), "parts", None) or []
212
+ for part in parts:
213
+ inline = getattr(part, "inline_data", None)
214
+ if inline and getattr(inline, "data", None):
215
+ p = self._save(saved, inline.data)
216
+ if p:
217
+ paths.append(p)
218
+ saved += 1
219
+ if saved >= self.num:
220
+ break
221
+ if saved >= self.num:
222
+ break
171
223
 
172
224
  if self.inline:
173
225
  self.signals.finished_inline.emit(self.ctx, paths, self.input_prompt)
174
226
  else:
175
227
  self.signals.finished.emit(self.ctx, paths, self.input_prompt)
228
+
176
229
  except Exception as e:
177
230
  self.signals.error.emit(e)
178
231
  finally:
179
232
  self._cleanup()
180
233
 
181
- def _is_imagen(self, model_id: str) -> bool:
182
- """
183
- Check if model_id is an Imagen model
234
+ # ---------- helpers ----------
184
235
 
185
- :param model_id: Model ID
186
- :return: True if Imagen model
236
+ def _using_vertex(self) -> bool:
187
237
  """
188
- return "imagen" in str(model_id).lower()
189
-
190
- def _imagen_generate(self, prompt: str, num: int, resolution: str):
238
+ Detect if Vertex AI is configured via env vars.
191
239
  """
192
- Call Imagen generate_images with config (number_of_images, optional aspect_ratio).
240
+ val = os.getenv("GOOGLE_GENAI_USE_VERTEXAI") or ""
241
+ return str(val).lower() in ("1", "true", "yes", "y")
193
242
 
194
- :param prompt: Prompt text
195
- :param num: Number of images to generate
196
- :param resolution: Resolution string, e.g. "1024x1024"
197
- :return: GenerateImagesResponse
198
- """
199
- aspect = self._aspect_from_resolution(resolution) # "1:1", "3:4",
243
+ def _is_imagen_generate(self, model_id: str) -> bool:
244
+ """True for Imagen generate models."""
245
+ mid = str(model_id).lower()
246
+ return "imagen" in mid and "generate" in mid
247
+
248
+ def _imagen_generate(self, prompt: str, num: int, resolution: str):
249
+ """Imagen text-to-image."""
250
+ aspect = self._aspect_from_resolution(resolution)
200
251
  cfg = gtypes.GenerateImagesConfig(number_of_images=num)
201
252
  if aspect:
202
253
  cfg.aspect_ratio = aspect
@@ -206,32 +257,114 @@ class ImageWorker(QRunnable):
206
257
  config=cfg,
207
258
  )
208
259
 
209
- def _aspect_from_resolution(self, resolution: str) -> Optional[str]:
260
+ def _imagen_edit(self, prompt: str, attachments: Dict[str, Any], num: int):
261
+ """
262
+ Imagen edit: requires Vertex AI and capability model (e.g. imagen-3.0-capability-001).
263
+ First attachment = base image, optional second = mask.
210
264
  """
211
- Derive aspect ratio string from resolution.
265
+ paths = self._collect_attachment_paths(attachments)
266
+ if len(paths) == 0:
267
+ raise RuntimeError("No attachment provided for edit mode.")
268
+
269
+ base_img = gtypes.Image.from_file(location=paths[0])
270
+ raw_ref = gtypes.RawReferenceImage(reference_id=0, reference_image=base_img)
271
+
272
+ if len(paths) >= 2:
273
+ mask_img = gtypes.Image.from_file(location=paths[1])
274
+ mask_ref = gtypes.MaskReferenceImage(
275
+ reference_id=1,
276
+ reference_image=mask_img,
277
+ config=gtypes.MaskReferenceConfig(
278
+ mask_mode="MASK_MODE_USER_PROVIDED",
279
+ mask_dilation=0.0,
280
+ ),
281
+ )
282
+ edit_mode = "EDIT_MODE_INPAINT_INSERTION"
283
+ else:
284
+ mask_ref = gtypes.MaskReferenceImage(
285
+ reference_id=1,
286
+ reference_image=None,
287
+ config=gtypes.MaskReferenceConfig(
288
+ mask_mode="MASK_MODE_BACKGROUND",
289
+ mask_dilation=0.0,
290
+ ),
291
+ )
292
+ edit_mode = "EDIT_MODE_BGSWAP"
293
+
294
+ cfg = gtypes.EditImageConfig(
295
+ edit_mode=edit_mode,
296
+ number_of_images=min(num, self.imagen_max_num),
297
+ include_rai_reason=True,
298
+ )
299
+
300
+ # Ensure capability model for edit
301
+ model_id = "imagen-3.0-capability-001"
302
+ return self.client.models.edit_image(
303
+ model=model_id,
304
+ prompt=prompt,
305
+ reference_images=[raw_ref, mask_ref],
306
+ config=cfg,
307
+ )
212
308
 
213
- :param resolution: Resolution string, e.g. "1024x1024"
214
- :return: Aspect ratio string, e.g. "1:1", "3:4", or None if unknown
309
+ def _gemini_edit(self, prompt: str, attachments: Dict[str, Any], num: int):
215
310
  """
311
+ Gemini image-to-image editing via generate_content (Developer/Vertex depending on client).
312
+ The first attachment is used as the input image.
313
+ """
314
+ paths = self._collect_attachment_paths(attachments)
315
+ if len(paths) == 0:
316
+ raise RuntimeError("No attachment provided for edit mode.")
317
+
318
+ img_path = paths[0]
319
+ with open(img_path, "rb") as f:
320
+ img_bytes = f.read()
321
+ mime = self._guess_mime(img_path)
322
+
323
+ return self.client.models.generate_content(
324
+ model=self.model,
325
+ contents=[prompt, gtypes.Part.from_bytes(data=img_bytes, mime_type=mime)],
326
+ )
327
+
328
+ def _collect_attachment_paths(self, attachments: Dict[str, Any]) -> List[str]:
329
+ """Extract file paths from attachments dict."""
330
+ out: List[str] = []
331
+ for _, att in (attachments or {}).items():
332
+ try:
333
+ if getattr(att, "path", None) and os.path.exists(att.path):
334
+ out.append(att.path)
335
+ except Exception:
336
+ continue
337
+ return out
338
+
339
+ def _aspect_from_resolution(self, resolution: str) -> Optional[str]:
340
+ """Derive aspect ratio for Imagen."""
216
341
  try:
217
- w, h = [int(x) for x in resolution.lower().split("x")]
218
- # Reduce to small set supported in docs
219
- ratios = {(1, 1): "1:1", (3, 4): "3:4", (4, 3): "4:3", (9, 16): "9:16", (16, 9): "16:9"}
220
- # Find nearest
221
342
  from math import gcd
343
+ tolerance = 0.08
344
+ w_str, h_str = resolution.lower().replace("×", "x").split("x")
345
+ w, h = int(w_str.strip()), int(h_str.strip())
346
+ if w <= 0 or h <= 0:
347
+ return None
348
+ supported = {
349
+ "1:1": 1 / 1,
350
+ "3:4": 3 / 4,
351
+ "4:3": 4 / 3,
352
+ "9:16": 9 / 16,
353
+ "16:9": 16 / 9,
354
+ }
222
355
  g = gcd(w, h)
223
- key = (w // g, h // g)
224
- return ratios.get(key)
356
+ key = f"{w // g}:{h // g}"
357
+ if key in supported:
358
+ return key
359
+ r = w / h
360
+ best = min(supported.keys(), key=lambda k: abs(r - supported[k]))
361
+ rel_err = abs(r - supported[best]) / supported[best]
362
+ return best if rel_err <= tolerance else None
225
363
  except Exception:
226
364
  return None
227
365
 
228
366
  def _extract_imagen_bytes(self, generated_image) -> Optional[bytes]:
229
- """
230
- Extract bytes from Imagen generated image object.
231
-
232
- :param generated_image: GeneratedImage object
233
- :return: Image bytes or None
234
- """
367
+ """Extract bytes from Imagen GeneratedImage."""
235
368
  img = getattr(generated_image, "image", None)
236
369
  if not img:
237
370
  return None
@@ -243,7 +376,6 @@ class ImageWorker(QRunnable):
243
376
  return base64.b64decode(data)
244
377
  except Exception:
245
378
  return None
246
- # fallback: url/uri if present
247
379
  url = getattr(img, "url", None) or getattr(img, "uri", None)
248
380
  if url:
249
381
  try:
@@ -255,13 +387,7 @@ class ImageWorker(QRunnable):
255
387
  return None
256
388
 
257
389
  def _save(self, idx: int, data: Optional[bytes]) -> Optional[str]:
258
- """
259
- Save image bytes to file and return path.
260
-
261
- :param idx: Image index (for filename)
262
- :param data: Image bytes
263
- :return: Path string or None
264
- """
390
+ """Save image bytes to file and return path."""
265
391
  if not data:
266
392
  return None
267
393
  name = (
@@ -276,8 +402,22 @@ class ImageWorker(QRunnable):
276
402
  return path
277
403
  return None
278
404
 
405
+ def _guess_mime(self, path: str) -> str:
406
+ """
407
+ Guess MIME type for a local image file.
408
+ """
409
+ mime, _ = mimetypes.guess_type(path)
410
+ if mime:
411
+ return mime
412
+ ext = os.path.splitext(path.lower())[1]
413
+ if ext in ('.jpg', '.jpeg'):
414
+ return 'image/jpeg'
415
+ if ext == '.webp':
416
+ return 'image/webp'
417
+ return 'image/png'
418
+
279
419
  def _cleanup(self):
280
- """Cleanup resources"""
420
+ """Cleanup resources."""
281
421
  sig = self.signals
282
422
  self.signals = None
283
423
  if sig is not None:
@@ -0,0 +1,12 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.31 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from .realtime import Realtime