pygpt-net 2.6.65__py3-none-any.whl → 2.6.66__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. pygpt_net/CHANGELOG.txt +11 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +2 -0
  4. pygpt_net/controller/chat/chat.py +0 -0
  5. pygpt_net/controller/chat/handler/openai_stream.py +137 -7
  6. pygpt_net/controller/chat/render.py +0 -0
  7. pygpt_net/controller/config/field/checkbox_list.py +34 -1
  8. pygpt_net/controller/media/media.py +20 -1
  9. pygpt_net/controller/presets/presets.py +4 -1
  10. pygpt_net/controller/ui/mode.py +14 -10
  11. pygpt_net/controller/ui/ui.py +18 -1
  12. pygpt_net/core/image/image.py +34 -1
  13. pygpt_net/core/tabs/tabs.py +0 -0
  14. pygpt_net/core/types/image.py +61 -3
  15. pygpt_net/data/config/config.json +4 -3
  16. pygpt_net/data/config/models.json +629 -41
  17. pygpt_net/data/locale/locale.de.ini +4 -0
  18. pygpt_net/data/locale/locale.en.ini +4 -0
  19. pygpt_net/data/locale/locale.es.ini +4 -0
  20. pygpt_net/data/locale/locale.fr.ini +4 -0
  21. pygpt_net/data/locale/locale.it.ini +4 -0
  22. pygpt_net/data/locale/locale.pl.ini +4 -0
  23. pygpt_net/data/locale/locale.uk.ini +4 -0
  24. pygpt_net/data/locale/locale.zh.ini +4 -0
  25. pygpt_net/item/model.py +15 -19
  26. pygpt_net/provider/agents/openai/agent.py +0 -0
  27. pygpt_net/provider/api/google/__init__.py +20 -9
  28. pygpt_net/provider/api/google/image.py +161 -28
  29. pygpt_net/provider/api/google/video.py +73 -36
  30. pygpt_net/provider/api/openai/__init__.py +21 -11
  31. pygpt_net/provider/api/openai/agents/client.py +0 -0
  32. pygpt_net/provider/api/openai/video.py +562 -0
  33. pygpt_net/provider/core/config/patch.py +7 -0
  34. pygpt_net/provider/core/model/patch.py +29 -3
  35. pygpt_net/provider/vector_stores/qdrant.py +117 -0
  36. pygpt_net/ui/layout/toolbox/raw.py +7 -1
  37. pygpt_net/ui/widget/option/checkbox_list.py +14 -2
  38. {pygpt_net-2.6.65.dist-info → pygpt_net-2.6.66.dist-info}/METADATA +66 -25
  39. {pygpt_net-2.6.65.dist-info → pygpt_net-2.6.66.dist-info}/RECORD +37 -35
  40. {pygpt_net-2.6.65.dist-info → pygpt_net-2.6.66.dist-info}/LICENSE +0 -0
  41. {pygpt_net-2.6.65.dist-info → pygpt_net-2.6.66.dist-info}/WHEEL +0 -0
  42. {pygpt_net-2.6.65.dist-info → pygpt_net-2.6.66.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,14 @@
1
+ 2.6.66 (2025-12-25)
2
+
3
+ - Added Sora 2 support - #155.
4
+ - Added Nano Banana support.
5
+ - Added Qdrant Vector Store - merged PR #147 by @Anush008.
6
+ - Added models: gpt-5.2, gpt-image-1.5, gemini-3, nano-banana-pro, sora-2, claude-sonnet-4.5, claude-opus-4.5, veo-3.1.
7
+ - Added Select/unselect All option in checkbox lists.
8
+ - OpenAI SDK upgraded to 2.14.0, Anthropic SDK upgraded to 0.75.0, xAI SDK upgraded to 1.5.0, Google GenAI upgraded to 1.56.0, LlamaIndex upgraded to 0.14.10.
9
+ - Fix: charset-normalizer 3.2.0 circular import - #152.
10
+ - Fix: Google client closed state.
11
+
1
12
  2.6.65 (2025-09-28)
2
13
 
3
14
  - Added drag and drop functionality for files and directories from the filesystem in attachments and file explorer.
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.28 00:00:00 #
9
+ # Updated Date: 2025.12.15 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.6.65"
17
- __build__ = "2025-09-28"
16
+ __version__ = "2.6.66"
17
+ __build__ = "2025-12-25"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
pygpt_net/app.py CHANGED
@@ -146,6 +146,7 @@ from pygpt_net.provider.llms.open_router import OpenRouterLLM
146
146
  from pygpt_net.provider.vector_stores.chroma import ChromaProvider
147
147
  from pygpt_net.provider.vector_stores.elasticsearch import ElasticsearchProvider
148
148
  from pygpt_net.provider.vector_stores.pinecode import PinecodeProvider
149
+ from pygpt_net.provider.vector_stores.qdrant import QdrantProvider
149
150
  from pygpt_net.provider.vector_stores.redis import RedisProvider
150
151
  from pygpt_net.provider.vector_stores.simple import SimpleProvider
151
152
 
@@ -475,6 +476,7 @@ def run(**kwargs):
475
476
  launcher.add_vector_store(ChromaProvider())
476
477
  launcher.add_vector_store(ElasticsearchProvider())
477
478
  launcher.add_vector_store(PinecodeProvider())
479
+ launcher.add_vector_store(QdrantProvider())
478
480
  launcher.add_vector_store(RedisProvider())
479
481
  launcher.add_vector_store(SimpleProvider())
480
482
 
File without changes
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.05 00:00:00 #
9
+ # Updated Date: 2025.12.26 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -17,6 +17,115 @@ from typing import Optional, Any
17
17
  from .utils import capture_openai_usage
18
18
 
19
19
 
20
+ # v2: Support both dict and Pydantic objects returned by OpenAI Python SDK v2
21
+ def _to_dict_safe(obj: Any) -> Optional[dict]:
22
+ """
23
+ Convert OpenAI SDK typed models (Pydantic) or plain objects to dict safely.
24
+
25
+ Returns:
26
+ dict or None
27
+ """
28
+ if obj is None:
29
+ return None
30
+ if isinstance(obj, dict):
31
+ return obj
32
+ # Pydantic v2
33
+ try:
34
+ if hasattr(obj, "model_dump"):
35
+ return obj.model_dump()
36
+ except Exception:
37
+ pass
38
+ # Pydantic v1 fallback
39
+ try:
40
+ if hasattr(obj, "dict"):
41
+ return obj.dict()
42
+ except Exception:
43
+ pass
44
+ # Generic best-effort
45
+ try:
46
+ return dict(obj)
47
+ except Exception:
48
+ pass
49
+ try:
50
+ return getattr(obj, "__dict__", None)
51
+ except Exception:
52
+ pass
53
+ return None
54
+
55
+
56
+ # v2: Extract nested attribute or dict key chain (e.g. "url_citation.url")
57
+ def _deep_get(obj: Any, path: str, default: Any = None) -> Any:
58
+ """
59
+ Best-effort nested getter that works for dicts and objects.
60
+ """
61
+ cur = obj
62
+ for part in path.split("."):
63
+ if cur is None:
64
+ return default
65
+ if isinstance(cur, dict):
66
+ cur = cur.get(part, None)
67
+ else:
68
+ cur = getattr(cur, part, None)
69
+ return cur if cur is not None else default
70
+
71
+
72
+ # v2: Normalize annotation shape across SDK versions
73
+ def _annotation_type(ann: Any) -> Optional[str]:
74
+ """
75
+ Return the annotation 'type' in a robust way.
76
+ """
77
+ t = getattr(ann, "type", None)
78
+ if t:
79
+ return t
80
+ if isinstance(ann, dict):
81
+ return ann.get("type")
82
+ # Try dictified view
83
+ ann_d = _to_dict_safe(ann)
84
+ if isinstance(ann_d, dict):
85
+ return ann_d.get("type")
86
+ return None
87
+
88
+
89
+ # v2: Extract URL from url_citation annotation across shapes
90
+ def _extract_url_from_annotation(ann: Any) -> Optional[str]:
91
+ """
92
+ Supports shapes:
93
+ - {"type":"url_citation","url":"..."}
94
+ - {"type":"url_citation","url_citation":{"url":"..."}}
95
+ - Typed models with attributes: ann.url OR ann.url_citation.url
96
+ """
97
+ # direct attribute
98
+ url = getattr(ann, "url", None)
99
+ if isinstance(url, str) and url:
100
+ return url
101
+
102
+ # dict direct
103
+ if isinstance(ann, dict):
104
+ url = ann.get("url")
105
+ if isinstance(url, str) and url:
106
+ return url
107
+ nested = ann.get("url_citation")
108
+ if isinstance(nested, dict):
109
+ url = nested.get("url")
110
+ if isinstance(url, str) and url:
111
+ return url
112
+
113
+ # typed nested or generic deep getters
114
+ for candidate in ("url_citation.url", "url_citation.href", "href", "source_url"):
115
+ url = _deep_get(ann, candidate)
116
+ if isinstance(url, str) and url:
117
+ return url
118
+
119
+ # try after dictify
120
+ ann_d = _to_dict_safe(ann)
121
+ if isinstance(ann_d, dict):
122
+ url = ann_d.get("url") or _deep_get(ann_d, "url_citation.url")
123
+ if isinstance(url, str) and url:
124
+ return url
125
+
126
+ return None
127
+
128
+
20
129
  def process_api_chat(ctx, state, chunk) -> Optional[str]:
21
130
  """
22
131
  OpenAI-compatible Chat Completions stream delta (robust to dict/object tool_calls).
@@ -196,17 +305,38 @@ def process_api_chat_responses(ctx, core, state, chunk, etype: Optional[str]) ->
196
305
 
197
306
  elif etype == "response.output_text.annotation.added":
198
307
  ann = chunk.annotation
199
- if ann['type'] == "url_citation":
308
+
309
+ # v2: SDK v2 can return a typed model; support both dict and typed
310
+ a_type = _annotation_type(ann)
311
+
312
+ if a_type == "url_citation":
200
313
  if state.citations is None:
201
314
  state.citations = []
202
- url_citation = ann['url']
203
- if url_citation not in state.citations:
315
+
316
+ # Extract URL across shapes and SDK versions
317
+ url_citation = _extract_url_from_annotation(ann)
318
+
319
+ if url_citation and url_citation not in state.citations:
204
320
  state.citations.append(url_citation)
321
+
322
+ # keep ctx.urls always reflecting the current list
205
323
  ctx.urls = state.citations
206
- elif ann['type'] == "container_file_citation":
324
+
325
+ elif a_type == "container_file_citation":
326
+ # container-created file (Code Interpreter)
327
+ ann_d = _to_dict_safe(ann) or {}
328
+ state.files.append({
329
+ "container_id": ann_d.get("container_id", _deep_get(ann, "container_id")),
330
+ "file_id": ann_d.get("file_id", _deep_get(ann, "file_id")),
331
+ })
332
+
333
+ elif a_type == "file_citation":
334
+ # v2: Some SDKs emit plain 'file_citation' (non-container). Keep parity with container handling.
335
+ ann_d = _to_dict_safe(ann) or {}
336
+ # optional: store as generic file citation (without container)
207
337
  state.files.append({
208
- "container_id": ann['container_id'],
209
- "file_id": ann['file_id'],
338
+ "container_id": ann_d.get("container_id", _deep_get(ann, "container_id")), # may be None
339
+ "file_id": ann_d.get("file_id", _deep_get(ann, "file_id")),
210
340
  })
211
341
 
212
342
  elif etype == "response.reasoning_summary_text.delta":
File without changes
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.12.25 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any, Dict, List
@@ -86,6 +86,39 @@ class CheckboxList:
86
86
  except Exception as e:
87
87
  self.window.core.debug.log(e)
88
88
 
89
+
90
+ def on_select_all(
91
+ self,
92
+ parent_id: str,
93
+ key: str,
94
+ option: dict
95
+ ):
96
+ """
97
+ Event: select all checkboxes
98
+
99
+ :param parent_id: Options parent ID
100
+ :param key: Option key
101
+ :param option: Option data
102
+ """
103
+ ui = self.window.ui
104
+ cfg_parent = ui.config.get(parent_id)
105
+ if not cfg_parent:
106
+ return
107
+ entry = cfg_parent.get(key)
108
+ if entry is None or not hasattr(entry, "boxes"):
109
+ return
110
+ boxes = entry.boxes
111
+
112
+ mode = "unselect_all"
113
+
114
+ for name, cb in boxes.items():
115
+ if cb is not None and not cb.isChecked():
116
+ mode = "select_all"
117
+
118
+ for name, cb in boxes.items():
119
+ if cb is not None:
120
+ cb.setChecked(mode == "select_all")
121
+
89
122
  def get_value(
90
123
  self,
91
124
  parent_id: str,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.01 23:00:00 #
9
+ # Updated Date: 2025.12.25 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any
@@ -30,6 +30,15 @@ class Media:
30
30
  else:
31
31
  self.window.ui.config['global']['img_raw'].setChecked(False)
32
32
 
33
+ # mode (image|video|music)
34
+ mode = self.window.core.config.get('img_mode', 'image')
35
+ self.window.controller.config.apply_value(
36
+ parent_id="global",
37
+ key="img_mode",
38
+ option=self.window.core.image.get_mode_option(),
39
+ value=mode,
40
+ )
41
+
33
42
  # image: resolution
34
43
  resolution = self.window.core.config.get('img_resolution', '1024x1024')
35
44
  self.window.controller.config.apply_value(
@@ -51,6 +60,7 @@ class Media:
51
60
  # -- add hooks --
52
61
  if not self.initialized:
53
62
  self.window.ui.add_hook("update.global.img_resolution", self.hook_update)
63
+ self.window.ui.add_hook("update.global.img_mode", self.hook_update)
54
64
  self.window.ui.add_hook("update.global.video.aspect_ratio", self.hook_update)
55
65
 
56
66
  def reload(self):
@@ -69,6 +79,11 @@ class Media:
69
79
  if not value:
70
80
  return
71
81
  self.window.core.config.set('img_resolution', value)
82
+ elif key == "img_mode":
83
+ if not value:
84
+ return
85
+ self.window.core.config.set('img_mode', value)
86
+ self.window.controller.ui.mode.update() # switch image|video options
72
87
  elif key == "video.aspect_ratio":
73
88
  if not value:
74
89
  return
@@ -92,6 +107,10 @@ class Media:
92
107
  else:
93
108
  self.enable_raw()
94
109
 
110
+ def get_mode(self) -> str:
111
+ """Get media generation mode (image/video/music)"""
112
+ return self.window.core.config.get("img_mode", "image")
113
+
95
114
  def is_image_model(self) -> bool:
96
115
  """
97
116
  Check if the model is an image generation model
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.26 03:00:00 #
9
+ # Updated Date: 2025.12.25 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
@@ -497,8 +497,10 @@ class Presets:
497
497
 
498
498
  :param no_scroll: do not scroll to current
499
499
  """
500
+ self.locked = True
500
501
  w = self.window
501
502
  if w.core.config.get('mode') == MODE_ASSISTANT:
503
+ self.locked = False
502
504
  return
503
505
  if no_scroll:
504
506
  w.ui.nodes['preset.presets'].store_scroll_position()
@@ -511,6 +513,7 @@ class Presets:
511
513
  if no_scroll:
512
514
  w.ui.nodes['preset.presets'].restore_scroll_position()
513
515
  self.on_changed()
516
+ self.locked = False
514
517
 
515
518
  def update_list(self):
516
519
  """Update presets list"""
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.27 15:00:00 #
9
+ # Updated Date: 2025.12.25 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from pygpt_net.core.types import (
@@ -39,7 +39,8 @@ class Mode:
39
39
  def update(self):
40
40
  """Update mode, model, preset and rest of the toolbox"""
41
41
 
42
- mode = self.window.core.config.data['mode']
42
+ mode = self.window.core.config.get("mode")
43
+ model = self.window.core.config.get("model")
43
44
 
44
45
  ui_nodes = self.window.ui.nodes
45
46
  ui_tabs = self.window.ui.tabs
@@ -53,7 +54,7 @@ class Mode:
53
54
  is_agent_llama = mode == MODE_AGENT_LLAMA
54
55
  is_agent_openai = mode == MODE_AGENT_OPENAI
55
56
  is_expert = mode == MODE_EXPERT
56
- is_image = mode == MODE_IMAGE
57
+ is_media = mode == MODE_IMAGE
57
58
  is_llama_index = mode == MODE_LLAMA_INDEX
58
59
  is_completion = mode == MODE_COMPLETION
59
60
  is_audio = mode == MODE_AUDIO
@@ -161,15 +162,18 @@ class Mode:
161
162
  ui_nodes['preset.editor.modes'].setVisible(True)
162
163
  ui_tabs['preset.editor.extra'].setTabText(0, trans("preset.prompt"))
163
164
 
164
- # image options visibility
165
- if is_image:
165
+ # media options visibility
166
+ if is_media:
166
167
  ui_nodes['media.raw'].setVisible(True)
167
- if ctrl.media.is_video_model():
168
+ if ctrl.media.is_video_model() and ctrl.media.get_mode() == "video":
168
169
  ui_nodes['video.options'].setVisible(True)
169
170
  ui_nodes['dalle.options'].setVisible(False)
170
- elif ctrl.media.is_image_model():
171
+ elif ctrl.media.is_image_model() and ctrl.media.get_mode() == "image":
171
172
  ui_nodes['dalle.options'].setVisible(True)
172
173
  ui_nodes['video.options'].setVisible(False)
174
+ elif ctrl.media.get_mode() == "music":
175
+ ui_nodes['dalle.options'].setVisible(False)
176
+ ui_nodes['video.options'].setVisible(False)
173
177
  else:
174
178
  ui_nodes['media.raw'].setVisible(False)
175
179
  ui_nodes['dalle.options'].setVisible(False)
@@ -199,7 +203,7 @@ class Mode:
199
203
  else:
200
204
  ui_nodes['idx.options'].setVisible(False)
201
205
 
202
- if is_image:
206
+ if is_media:
203
207
  ui_nodes['input.stream'].setVisible(False)
204
208
  else:
205
209
  ui_nodes['input.stream'].setVisible(True)
@@ -213,13 +217,13 @@ class Mode:
213
217
  ui_tabs['input'].setTabVisible(1, show)
214
218
 
215
219
  # remote tools icon visibility
216
- if not is_image and not is_completion:
220
+ if not is_media and not is_completion:
217
221
  self.window.ui.nodes['input'].set_icon_visible("web", True)
218
222
  else:
219
223
  self.window.ui.nodes['input'].set_icon_visible("web", False)
220
224
 
221
225
  ui_tabs['input'].setTabVisible(2, is_assistant)
222
- ui_tabs['input'].setTabVisible(3, (not is_assistant) and (not is_image))
226
+ ui_tabs['input'].setTabVisible(3, (not is_assistant) and (not is_media))
223
227
 
224
228
  presets_editor.toggle_extra_options()
225
229
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.26 17:00:00 #
9
+ # Updated Date: 2025.12.25 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -68,6 +68,7 @@ class UI:
68
68
  self.update_tokens()
69
69
  self.vision.update()
70
70
  self.window.controller.agent.legacy.update()
71
+ self.img_update_available_modes()
71
72
  self.img_update_available_resolutions()
72
73
 
73
74
  def handle(self, event: BaseEvent):
@@ -258,4 +259,20 @@ class UI:
258
259
  key="img_resolution",
259
260
  option=self.window.core.image.get_resolution_option(),
260
261
  value=current,
262
+ )
263
+
264
+ def img_update_available_modes(self):
265
+ """Update available modes for images"""
266
+ mode = self.window.core.config.get('mode')
267
+ if mode != MODE_IMAGE:
268
+ return
269
+ model = self.window.core.config.get('model')
270
+ keys = self.window.core.image.get_available_modes(model)
271
+ current = self.window.core.config.get('img_mode', 'image')
272
+ self.window.ui.config['global']['img_mode'].set_keys(keys, lock=False)
273
+ self.window.controller.config.apply_value(
274
+ parent_id="global",
275
+ key="img_mode",
276
+ option=self.window.core.image.get_mode_option(),
277
+ value=current,
261
278
  )
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.01 23:00:00 #
9
+ # Updated Date: 2025.12.25 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import uuid
@@ -158,6 +158,39 @@ class Image(QObject):
158
158
  "keys": self.get_available_resolutions(),
159
159
  }
160
160
 
161
+ def get_mode_option(self) -> dict:
162
+ """
163
+ Get image mode option for UI
164
+
165
+ :return: dict
166
+ """
167
+ return {
168
+ "type": "combo",
169
+ "slider": True,
170
+ "label": "img_mode",
171
+ "value": "image",
172
+ "keys": self.get_available_modes(),
173
+ }
174
+
175
+ def get_available_modes(self, model_name: str = None) -> Dict[str, str]:
176
+ """
177
+ Get available modes (image / video)
178
+
179
+ :param model_name: model name
180
+ :return: dict of available modes
181
+ """
182
+ options = {}
183
+ if model_name:
184
+ model = self.window.core.models.get(model_name)
185
+ if model:
186
+ if model.is_image_output():
187
+ options["image"] = trans("mode.img.image")
188
+ if model.is_video_output():
189
+ options["video"] = trans("mode.img.video")
190
+ if model.is_music_output():
191
+ options["music"] = trans("mode.img.music")
192
+ return options
193
+
161
194
  def get_available_resolutions(self, model: str = None) -> Dict[str, str]:
162
195
  """
163
196
  Get available image resolutions
File without changes
@@ -6,12 +6,20 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.01 23:00:00 #
9
+ # Updated Date: 2025.12.25 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  VIDEO_AVAILABLE_ASPECT_RATIOS = {
13
- "16:9": "16:9",
13
+ "1:1": "1:1",
14
+ "2:3": "2:3",
15
+ "3:2": "3:2",
16
+ "3:4": "3:4",
17
+ "4:3": "4:3",
18
+ "4:5": "4:5",
19
+ "5:4": "5:4",
14
20
  "9:16": "9:16",
21
+ "16:9": "16:9",
22
+ "21:9": "21:9",
15
23
  }
16
24
 
17
25
 
@@ -50,5 +58,55 @@ IMAGE_AVAILABLE_RESOLUTIONS = {
50
58
  "2560x1792": "2560x1792",
51
59
  "1536x2816": "1536x2816",
52
60
  "2816x1536": "2816x1536"
53
- }
61
+ },
62
+ "nano-banana-pro": {
63
+ "2048x2048": "2048x2048",
64
+ "4096x4096": "4096x4096",
65
+ "1664x2496": "1664x2496",
66
+ "2496x1664": "2496x1664",
67
+ "3328x4992": "3328x4992",
68
+ "4992x3328": "4992x3328",
69
+ "1728x2368": "1728x2368",
70
+ "2368x1728": "2368x1728",
71
+ "3456x4736": "3456x4736",
72
+ "4736x3456": "4736x3456",
73
+ "1792x2304": "1792x2304",
74
+ "2304x1792": "2304x1792",
75
+ "3584x4608": "3584x4608",
76
+ "4608x3584": "4608x3584",
77
+ "1536x2688": "1536x2688",
78
+ "2688x1536": "2688x1536",
79
+ "3072x5376": "3072x5376",
80
+ "5376x3072": "5376x3072",
81
+ "3072x1344": "3072x1344",
82
+ "6144x2688": "6144x2688"
83
+ },
84
+ "nano-banana": {
85
+ "1024x1024": "1024x1024",
86
+ "832x1248": "832x1248",
87
+ "1248x832": "1248x832",
88
+ "864x1184": "864x1184",
89
+ "1184x864": "1184x864",
90
+ "896x1152": "896x1152",
91
+ "1152x896": "1152x896",
92
+ "768x1344": "768x1344",
93
+ "1344x768": "1344x768",
94
+ "1536x672": "1536x672"
95
+ },
96
+ "sora-2-pro": {
97
+ "1280x720": "1280x720",
98
+ "720x1280": "720x1280",
99
+ "1792x1024": "1792x1024",
100
+ "1024x1792": "1024x1792"
101
+ },
102
+ "sora-2": {
103
+ "1280x720": "1280x720",
104
+ "720x1280": "720x1280"
105
+ },
106
+ "veo-3": {
107
+ "1280x720": "1280x720",
108
+ "720x1280": "720x1280",
109
+ "1920x1080": "1920x1080",
110
+ "1080x1920": "1080x1920"
111
+ },
54
112
  }
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.65",
4
- "app.version": "2.6.65",
5
- "updated_at": "2025-09-28T00:00:00"
3
+ "version": "2.6.66",
4
+ "app.version": "2.6.66",
5
+ "updated_at": "2025-12-15T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -207,6 +207,7 @@
207
207
  "font_size.toolbox": 12,
208
208
  "frequency_penalty": 0.0,
209
209
  "func_call.native": true,
210
+ "img_mode": "image",
210
211
  "img_prompt_model": "gpt-4o",
211
212
  "img_raw": true,
212
213
  "img_resolution": "1024x1024",